| { |
| "paper_id": "K17-1012", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:07:20.399888Z" |
| }, |
| "title": "Embedding Words and Senses Together via Joint Knowledge-Enhanced Training", |
| "authors": [ |
| { |
| "first": "Massimiliano", |
| "middle": [], |
| "last": "Mancini", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Sapienza University of Rome", |
| "location": {} |
| }, |
| "email": "mancini@dis.uniroma1.it" |
| }, |
| { |
| "first": "Jose", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Sapienza University of Rome", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ignacio", |
| "middle": [], |
| "last": "Iacobacci", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Sapienza University of Rome", |
| "location": {} |
| }, |
| "email": "iacobacci@di.uniroma1.it" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Sapienza University of Rome", |
| "location": {} |
| }, |
| "email": "navigli@di.uniroma1.it" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Word embeddings are widely used in Natural Language Processing, mainly due to their success in capturing semantic information from massive corpora. However, their creation process does not allow the different meanings of a word to be automatically separated, as it conflates them into a single vector. We address this issue by proposing a new model which learns word and sense embeddings jointly. Our model exploits large corpora and knowledge from semantic networks in order to produce a unified vector space of word and sense embeddings. We evaluate the main features of our approach both qualitatively and quantitatively in a variety of tasks, highlighting the advantages of the proposed method in comparison to stateof-the-art word-and sense-based models.", |
| "pdf_parse": { |
| "paper_id": "K17-1012", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Word embeddings are widely used in Natural Language Processing, mainly due to their success in capturing semantic information from massive corpora. However, their creation process does not allow the different meanings of a word to be automatically separated, as it conflates them into a single vector. We address this issue by proposing a new model which learns word and sense embeddings jointly. Our model exploits large corpora and knowledge from semantic networks in order to produce a unified vector space of word and sense embeddings. We evaluate the main features of our approach both qualitatively and quantitatively in a variety of tasks, highlighting the advantages of the proposed method in comparison to stateof-the-art word-and sense-based models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Recently, approaches based on neural networks which embed words into low-dimensional vector spaces from text corpora (i.e. word embeddings) have become increasingly popular (Mikolov et al., 2013; Pennington et al., 2014) . Word embeddings have proved to be beneficial in many Natural Language Processing tasks, such as Machine Translation (Zou et al., 2013) , syntactic parsing (Weiss et al., 2015) , and Question Answering (Bordes et al., 2014) , to name a few. Despite their success in capturing semantic properties of words, these representations are generally hampered by an important limitation: the inability to discriminate among different meanings of the same word.", |
| "cite_spans": [ |
| { |
| "start": 173, |
| "end": 195, |
| "text": "(Mikolov et al., 2013;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 196, |
| "end": 220, |
| "text": "Pennington et al., 2014)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 339, |
| "end": 357, |
| "text": "(Zou et al., 2013)", |
| "ref_id": "BIBREF67" |
| }, |
| { |
| "start": 378, |
| "end": 398, |
| "text": "(Weiss et al., 2015)", |
| "ref_id": "BIBREF64" |
| }, |
| { |
| "start": 424, |
| "end": 445, |
| "text": "(Bordes et al., 2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Authors marked with an asterisk (*) contributed equally.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Previous works have addressed this limitation by automatically inducing word senses from monolingual corpora (Sch\u00fctze, 1998; Reisinger and Mooney, 2010; Huang et al., 2012; Di Marco and Navigli, 2013; Neelakantan et al., 2014; Tian et al., 2014; Li and Jurafsky, 2015; Vu and Parker, 2016; Qiu et al., 2016) , or bilingual parallel data (Guo et al., 2014; Ettinger et al., 2016; Suster et al., 2016) . However, these approaches learn solely on the basis of statistics extracted from text corpora and do not exploit knowledge from semantic networks. Additionally, their induced senses are neither readily interpretable (Panchenko et al., 2017) nor easily mappable to lexical resources, which limits their application. Recent approaches have utilized semantic networks to inject knowledge into existing word representations (Yu and Dredze, 2014; Faruqui et al., 2015; Goikoetxea et al., 2015; Speer and Lowry-Duda, 2017; Mrksic et al., 2017) , but without solving the meaning conflation issue. In order to obtain a representation for each sense of a word, a number of approaches have leveraged lexical resources to learn sense embeddings as a result of post-processing conventional word embeddings Johansson and Pina, 2015; Rothe and Sch\u00fctze, 2015; Pilehvar and Collier, 2016; Camacho-Collados et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 109, |
| "end": 124, |
| "text": "(Sch\u00fctze, 1998;", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 125, |
| "end": 152, |
| "text": "Reisinger and Mooney, 2010;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 153, |
| "end": 172, |
| "text": "Huang et al., 2012;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 173, |
| "end": 200, |
| "text": "Di Marco and Navigli, 2013;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 201, |
| "end": 226, |
| "text": "Neelakantan et al., 2014;", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 227, |
| "end": 245, |
| "text": "Tian et al., 2014;", |
| "ref_id": "BIBREF61" |
| }, |
| { |
| "start": 246, |
| "end": 268, |
| "text": "Li and Jurafsky, 2015;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 269, |
| "end": 289, |
| "text": "Vu and Parker, 2016;", |
| "ref_id": "BIBREF62" |
| }, |
| { |
| "start": 290, |
| "end": 307, |
| "text": "Qiu et al., 2016)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 337, |
| "end": 355, |
| "text": "(Guo et al., 2014;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 356, |
| "end": 378, |
| "text": "Ettinger et al., 2016;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 379, |
| "end": 399, |
| "text": "Suster et al., 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 618, |
| "end": 642, |
| "text": "(Panchenko et al., 2017)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 822, |
| "end": 843, |
| "text": "(Yu and Dredze, 2014;", |
| "ref_id": "BIBREF65" |
| }, |
| { |
| "start": 844, |
| "end": 865, |
| "text": "Faruqui et al., 2015;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 866, |
| "end": 890, |
| "text": "Goikoetxea et al., 2015;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 891, |
| "end": 918, |
| "text": "Speer and Lowry-Duda, 2017;", |
| "ref_id": "BIBREF59" |
| }, |
| { |
| "start": 919, |
| "end": 939, |
| "text": "Mrksic et al., 2017)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 1196, |
| "end": 1221, |
| "text": "Johansson and Pina, 2015;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1222, |
| "end": 1246, |
| "text": "Rothe and Sch\u00fctze, 2015;", |
| "ref_id": "BIBREF55" |
| }, |
| { |
| "start": 1247, |
| "end": 1274, |
| "text": "Pilehvar and Collier, 2016;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 1275, |
| "end": 1305, |
| "text": "Camacho-Collados et al., 2016)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Instead, we propose SW2V (Senses and Words to Vectors), a neural model that exploits knowledge from both text corpora and semantic networks in order to simultaneously learn embeddings for both words and senses. Moreover, our model provides three additional key features: (1) both word and sense embeddings are represented in the same vector space, (2) it is flexible, as it can be applied to different predictive models, and (3) it is scalable for very large semantic networks and text corpora.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Embedding words from large corpora into a lowdimensional vector space has been a popular task since the appearance of the probabilistic feedforward neural network language model (Bengio et al., 2003) and later developments such as word2vec (Mikolov et al., 2013) and GloVe (Pennington et al., 2014) . However, little research has focused on exploiting lexical resources to overcome the inherent ambiguity of word embeddings. Iacobacci et al. (2015) overcame this limitation by applying an off-the-shelf disambiguation system (i.e. Babelfy (Moro et al., 2014) ) to a corpus and then using word2vec to learn sense embeddings over the pre-disambiguated text. However, in their approach words are replaced by their intended senses, consequently producing as output sense representations only. The representation of words and senses in the same vector space proves essential for applying these knowledgebased sense embeddings in downstream applications, particularly for their integration into neural architectures (Pilehvar et al., 2017) . In the literature, various different methods have attempted to overcome this limitation. proposed a model for obtaining both word and sense representations based on a first training step of conventional word embeddings, a second disambiguation step based on sense definitions, and a final training phase which uses the disambiguated text as input. Likewise, Rothe and Sch\u00fctze (2015) aimed at building a shared space of word and sense embeddings based on two steps: a first training step of only word embeddings and a second training step to produce sense and synset embeddings. These two approaches require multiple steps of training and make use of a relatively small resource like WordNet, which limits their coverage and applicability. Camacho-Collados et al. (2016) increased the coverage of these WordNetbased approaches by exploiting the complementary knowledge of WordNet and Wikipedia along with pre-trained word embeddings. Finally, and Fang et al. (2016) proposed a model to align vector spaces of words and entities from knowledge bases. However, these approaches are restricted to nominal instances only (i.e. Wikipedia pages or entities).", |
| "cite_spans": [ |
| { |
| "start": 178, |
| "end": 199, |
| "text": "(Bengio et al., 2003)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 240, |
| "end": 262, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 273, |
| "end": 298, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 425, |
| "end": 448, |
| "text": "Iacobacci et al. (2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 539, |
| "end": 558, |
| "text": "(Moro et al., 2014)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 1010, |
| "end": 1033, |
| "text": "(Pilehvar et al., 2017)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 1394, |
| "end": 1418, |
| "text": "Rothe and Sch\u00fctze (2015)", |
| "ref_id": "BIBREF55" |
| }, |
| { |
| "start": 1775, |
| "end": 1805, |
| "text": "Camacho-Collados et al. (2016)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1982, |
| "end": 2000, |
| "text": "Fang et al. (2016)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In contrast, we propose a model which learns both words and sense embeddings from a single joint training phase, producing a common vector space of words and senses as an emerging feature.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In order to jointly produce embeddings for words and senses, SW2V needs as input a corpus where words are connected to senses 1 in each given context. One option for obtaining such connections could be to take a sense-annotated corpus as input. However, manually annotating large amounts of data is extremely expensive and therefore impractical in normal settings. Obtaining sense-annotated data from current off-the-shelf disambiguation and entity linking systems is possible, but generally suffers from two major problems. First, supervised systems are hampered by the very same problem of needing large amounts of sense-annotated data. Second, the relatively slow speed of current disambiguation systems, such as graph-based approaches (Hoffart et al., 2012; Agirre et al., 2014; Moro et al., 2014) , or word-expert supervised systems (Zhong and Ng, 2010; Iacobacci et al., 2016; Melamud et al., 2016) , could become an obstacle when applied to large corpora. This is the reason why we propose a simple yet effective unsupervised shallow word-sense connectivity algorithm, which can be applied to virtually any given semantic network and is linear on the corpus size. The main idea of the algorithm is to exploit the connections of a semantic network by associating words with the senses that are most connected within the sentence, according to the underlying network.", |
| "cite_spans": [ |
| { |
| "start": 739, |
| "end": 761, |
| "text": "(Hoffart et al., 2012;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 762, |
| "end": 782, |
| "text": "Agirre et al., 2014;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 783, |
| "end": 801, |
| "text": "Moro et al., 2014)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 838, |
| "end": 858, |
| "text": "(Zhong and Ng, 2010;", |
| "ref_id": "BIBREF66" |
| }, |
| { |
| "start": 859, |
| "end": 882, |
| "text": "Iacobacci et al., 2016;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 883, |
| "end": 904, |
| "text": "Melamud et al., 2016)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Shallow word-sense connectivity algorithm. Formally, a corpus and a semantic network are taken as input and a set of connected words and senses is produced as output. We define a semantic network as a graph (S, E) where the set S contains synsets (nodes) and E represents a set of semantically connected synset pairs (edges). Algorithm 1 describes how to connect words and senses in a given text (sentence or paragraph) T . First, we gather in a set S T all candidate synsets of the words (including multiwords up to trigrams) in T (lines 1 to 3). Second, for each candidate synset s we calculate the number of synsets which are connected with s in the semantic network and are included in S T , excluding connections of synsets which only appear as candidates of the Algorithm 1 Shallow word-sense connectivity Input: Semantic network (S, E) and text T represented as a bag of words Output: Set of connected words and senses T * \u2282 T \u00d7 S 1: Set of synsets ST \u2190 \u2205 2: for each word w \u2208 T 3: ST \u2190 ST \u222a Sw (Sw: set of candidate synsets of w) 4: Minimum connections threshold \u03b8 \u2190 |S T |+|T | 2 \u03b4 5: Output set of connections T * \u2190 \u2205 6: for each w \u2208 T 7:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Relative maximum connections max = 0 8: Set of senses associated with w, Cw \u2190 \u2205 9:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "for each candidate synset s \u2208 Sw 10:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Number of edges n = |s \u2208 ST : (s, s ) \u2208 E & \u2203w \u2208 T : w = w & s \u2208 S w | 11: if n \u2265 max & n \u2265 \u03b8 then 12:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "if n > max then 13:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Cw \u2190 {(w, s)} 14:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "max \u2190 n 15:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "else 16: Cw \u2190 Cw \u222a {(w, s)} 17:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "T * \u2190 T * \u222a Cw 18: return Output set of connected words and senses T * same word (lines 5 to 10). Finally, each word is associated with its top candidate synset(s) according to its/their number of connections in context, provided that its/their number of connections exceeds a threshold \u03b8", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "= |S T |+|T | 2 \u03b4", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "(lines 11 to 17). 2 This parameter aims to retain relevant connectivity across senses, as only senses above the threshold will be connected to words in the output corpus. \u03b8 is proportional to the reciprocal of a parameter \u03b4, 3 and directly proportional to the average text length and number of candidate synsets within the text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The complexity of the proposed algorithm is N + (N \u00d7 \u03b1), where N is the number of words of the training corpus and \u03b1 is the average polysemy degree of a word in the corpus according to the input semantic network. Considering that noncontent words are not taken into account (i.e. polysemy degree 0) and that the average polysemy degree of words in current lexical resources (e.g. WordNet or BabelNet) does not exceed a small constant (3) in any language, we can safely assume that the algorithm is linear in the size of the training corpus. Hence, the training time is not significantly increased in comparison to training words 2 As mentioned above, all unigrams, bigrams and trigrams present in the semantic network are considered. In the case of overlapping instances, the selection of the final instance is performed in this order: mention whose synset is more connected (i.e. n is higher), longer mention and from left to right.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "3 Higher values of \u03b4 lead to higher recall, while lower values of \u03b4 increase precision but lower the recall. We set the value of \u03b4 to 100, as it was shown to produce a fine balance between precision and recall. This parameter may also be tuned on downstream tasks. only, irrespective of the corpus size. This enables a fast training on large amounts of text corpora, in contrast to current unsupervised disambiguation algorithms. Additionally, as we will show in Section 5.2, this algorithm does not only speed up significantly the training phase, but also leads to more accurate results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Note that with our algorithm a word is allowed to have more than one sense associated. In fact, current lexical resources like WordNet (Miller, 1995) or BabelNet (Navigli and Ponzetto, 2012) are hampered by the high granularity of their sense inventories (Hovy et al., 2013) . In Section 6.2 we show how our sense embeddings are particularly suited to deal with this issue.", |
| "cite_spans": [ |
| { |
| "start": 135, |
| "end": 149, |
| "text": "(Miller, 1995)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 162, |
| "end": 190, |
| "text": "(Navigli and Ponzetto, 2012)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 255, |
| "end": 274, |
| "text": "(Hovy et al., 2013)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connecting words and senses in context", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The goal of our approach is to obtain a shared vector space of words and senses. To this end, our model extends conventional word embedding models by integrating explicit knowledge into its architecture. While we will focus on the Continuous Bag Of Words (CBOW) architecture of word2vec (Mikolov et al., 2013) , our extension can easily be applied similarly to Skip-Gram, or to other predictive approaches based on neural networks. The CBOW architecture is based on the feedforward neural network language model (Bengio et al., 2003) and aims at predicting the current word using its surrounding context. The architecture consists of input, hidden and output layers. The input layer has the size of the word vocabulary and encodes the context as a combination of onehot vector representations of surrounding words of a given target word. The output layer has the same size as the input layer and contains a one-hot vector of the target word during the training phase.", |
| "cite_spans": [ |
| { |
| "start": 287, |
| "end": 309, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 512, |
| "end": 533, |
| "text": "(Bengio et al., 2003)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Joint training of words and senses", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Our model extends the input and output layers of the neural network with word senses 4 by exploiting the intrinsic relationship between words and senses. The leading principle is that, since a word is the surface form of an underlying sense, updating the embedding of the word should produce a consequent update to the embedding representing that particular sense, and vice-versa. As a consequence of the algorithm described in the previous section, each word in the corpus may be connected with zero, one or more senses. We re- Figure 1 : The SW2V architecture on a sample training instance using four context words. Dotted lines represent the virtual link between words and associated senses in context. In this example, the input layer consists of a context of two previous words (w t\u22122 , w t\u22121 ) and two subsequent words (w t+1 , w t+2 ) with respect to the target word w t . Two words (w t\u22121 , w t+2 ) do not have senses associated in context, while w t\u22122 , w t+1 have three senses (s 1 t\u22121 , s 2 t\u22121 , s 3 t\u22121 ) and one sense associated (s 1 t+1 ) in context, respectively. The output layer consists of the target word w t , which has two senses associated (s 1 t , s 2 t ) in context.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 529, |
| "end": 537, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Joint training of words and senses", |
| "sec_num": "4" |
| }, |
| { |
| "text": "fer to the set of senses connected to a given word within the specific context as its associated senses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Joint training of words and senses", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Formally, we define a training instance as a sequence of words W = w t\u2212n , ..., w t , ..., w t+n (being w t the target word) and S", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Joint training of words and senses", |
| "sec_num": "4" |
| }, |
| { |
| "text": "= S t\u2212n , ..., S t , ...., S t+n , where S i = s 1 i , ..., s k i i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Joint training of words and senses", |
| "sec_num": "4" |
| }, |
| { |
| "text": "is the sequence of all associated senses in context of w i \u2208 W . Note that S i might be empty if the word w i does not have any associated sense. In our model each target word takes as context both its surrounding words and all the senses associated with them. In contrast to the original CBOW architecture, where the training criterion is to correctly classify w t , our approach aims to predict the word w t and its set S t of associated senses. This is equivalent to minimizing the following loss function:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Joint training of words and senses", |
| "sec_num": "4" |
| }, |
| { |
| "text": "E = \u2212 log(p(w t |W t , S t ))\u2212 s\u2208St log(p(s|W t , S t ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Joint training of words and senses", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where Figure 1 shows the organization of the input and the output layers on a sample training instance. In what follows we present a set of variants of the model on the output and the input layers.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 6, |
| "end": 14, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Joint training of words and senses", |
| "sec_num": "4" |
| }, |
| { |
| "text": "W t = w t\u2212n , ..., w t\u22121 , w t+1 , ..., w t+n and S t = S t\u2212n , ..., S t\u22121 , S t+1 , ..., S t+n .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Joint training of words and senses", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Both words and senses. This is the default case explained above. If a word has one or more associated senses, these senses are also used as target on a separate output layer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Output layer alternatives", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Only words. In this case we exclude senses as target. There is a single output layer with the size of the word vocabulary as in the original CBOW model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Output layer alternatives", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Only senses. In contrast, this alternative excludes words, using only senses as target. In this case, if a word does not have any associated sense, it is not used as target instance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Output layer alternatives", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Both words and senses. Words and their associated senses are included in the input layer and contribute to the hidden state. Both words and senses are updated as a consequence of the backpropagation algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input layer alternatives", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Only words. In this alternative only the surrounding words contribute to the hidden state, i.e. the target word/sense (depending on the alternative of the output layer) is predicted only from word features. The update of an input word is propagated to the embeddings of its associated senses, if any. In other words, despite not being included in the input layer, senses still receive the same gradient of the associated input word, through a virtual connection. This configuration, coupled with the only-words output layer configuration, corresponds exactly to the default CBOW architecture of word2vec with the only addition of the update step for senses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input layer alternatives", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Only senses. Words are excluded from the input layer and the target is predicted only from the senses associated with the surrounding words. The weights of the words are updated through the updates of the associated senses, in contrast to the only-words alternative.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input layer alternatives", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In this section we analyze the different components of SW2V, including the nine model configurations (Section 5.1) and the algorithm which generates the connections between words and senses in context (Section 5.2). In what follows we describe the common analysis setting:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Model Components", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 Training model and hyperparameters. For evaluation purposes, we use the CBOW model of word2vec with standard hyperparameters: the dimensionality of the vectors is set to 300 and the window size to 8, and hierarchical softmax is used for normalization. These hyperparameter values are set across all experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Model Components", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 Corpus and semantic network. We use a 300M-words corpus from the UMBC project (Han et al., 2013) , which contains English paragraphs extracted from the web. 5 As semantic network we use BabelNet 3.0 6 , a large multilingual semantic network with over 350 million semantic connections, integrating resources such as Wikipedia and WordNet. We chose BabelNet owing to its wide coverage of named entities and lexicographic knowledge.", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 98, |
| "text": "(Han et al., 2013)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Model Components", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 Benchmark. Word similarity has been one of the most popular benchmarks for in-vitro evaluation of vector space models (Pennington et al., 2014; Levy et al., 2015) . For the analysis we use two word similarity datasets: the similarity portion (Agirre et al., 2009 , WS-Sim) of the WordSim-353 dataset (Finkelstein et al., 2002) and RG-65 (Rubenstein and Goodenough, 1965) . In order to compute the similarity of two words using our sense embeddings, we apply the standard closest senses strategy (Resnik, 1995; Budanitsky and Hirst, 2006; Camacho-Collados et al., 2015) , using cosine similarity (cos) as comparison measure between senses:", |
| "cite_spans": [ |
| { |
| "start": 120, |
| "end": 145, |
| "text": "(Pennington et al., 2014;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 146, |
| "end": 164, |
| "text": "Levy et al., 2015)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 244, |
| "end": 264, |
| "text": "(Agirre et al., 2009", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 302, |
| "end": 328, |
| "text": "(Finkelstein et al., 2002)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 333, |
| "end": 372, |
| "text": "RG-65 (Rubenstein and Goodenough, 1965)", |
| "ref_id": null |
| }, |
| { |
| "start": 497, |
| "end": 511, |
| "text": "(Resnik, 1995;", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 512, |
| "end": 539, |
| "text": "Budanitsky and Hirst, 2006;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 540, |
| "end": 570, |
| "text": "Camacho-Collados et al., 2015)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Model Components", |
| "sec_num": "5" |
| }, |
| { |
| "text": "sim(w 1 , w 2 ) = max s\u2208Sw 1 ,s \u2208Sw 2 cos( s 1 , s 2 ) (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Model Components", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where S w i represents the set of all candidate senses of w i and s i refers to the sense vector representation of the sense s i .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Model Components", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this section we analyze the different configurations of our model in respect of the input and the output layer on a word similarity experiment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model configurations", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Recall from Section 4 that our model could have words, senses or both in either the input and output layers. Table 1 shows the results of all nine configurations on the WS-Sim and RG-65 datasets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 109, |
| "end": 116, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model configurations", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "As shown in Table 1 , the best configuration according to both Spearman and Pearson correlation measures is the configuration which has only senses in the input layer and both words and senses in the output layer. 7 In fact, taking only senses as input seems to be consistently the best alternative for the input layer. Our hunch is that the knowledge learned from both the co-occurrence information and the semantic network is more balanced with this input setting. For instance, in the case of including both words and senses in the input layer, the co-occurrence information learned by the network would be duplicated for both words and senses.", |
| "cite_spans": [ |
| { |
| "start": 214, |
| "end": 215, |
| "text": "7", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model configurations", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In this section we evaluate the impact of our shallow word-sense connectivity algorithm (Section 3) by testing our model directly taking a predisambiguated text as input. In this case the network exploits the connections between each word and its disambiguated sense in context. For this comparison we used Babelfy 8 (Moro et al., 2014) , a state-of-the-art graph-based disambiguation and entity linking system based on BabelNet. We compare to both the default Babelfy system which Table 2 : Pearson (r) and Spearman (\u03c1) correlation performance of SW2V integrating our shallow word-sense connectivity algorithm (default), Babelfy, or Babelfy*.", |
| "cite_spans": [ |
| { |
| "start": 317, |
| "end": 336, |
| "text": "(Moro et al., 2014)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 482, |
| "end": 489, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Disambiguation / Shallow word-sense connectivity algorithm", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "uses the Most Common Sense (MCS) heuristic as a back-off strategy and, following (Iacobacci et al., 2015) , we also include a version in which only instances above the Babelfy default confidence threshold are disambiguated (i.e. the MCS backoff strategy is disabled). We will refer to this latter version as Babelfy* and report the best configuration of each strategy according to our analysis. Table 2 shows the results of our model using the three different strategies on RG-65 and WS-Sim. Our shallow word-sense connectivity algorithm achieves the best overall results. We believe that these results are due to the semantic connectivity ensured by our algorithm and to the possibility of associating words with more than one sense, which seems beneficial for training, making it more robust to possible disambiguation errors and to the sense granularity issue (Erk et al., 2013) . The results are especially significant considering that our algorithm took a tenth of the time needed by Babelfy to process the corpus.", |
| "cite_spans": [ |
| { |
| "start": 81, |
| "end": 105, |
| "text": "(Iacobacci et al., 2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 863, |
| "end": 881, |
| "text": "(Erk et al., 2013)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 395, |
| "end": 402, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Disambiguation / Shallow word-sense connectivity algorithm", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We perform a qualitative and quantitative evaluation of important features of SW2V in three different tasks. First, in order to compare our model against standard word-based approaches, we evaluate our system in the word similarity task (Section 6.1). Second, we measure the quality of our sense embeddings in a sense-specific application: sense clustering (Section 6.2). Finally, we evaluate the coherence of our unified vector space by measuring the interconnectivity of word and sense embeddings (Section 6.3).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Experimental setting. Throughout all the experiments we use the same standard hyperparameters mentioned in Section 5 for both the original word2vec implementation and our proposed model SW2V. For SW2V we use the same optimal configuration according to the analysis of the previous section (only senses as input, and both words and senses as output) for all tasks. As training corpus we take the full 3B-words UMBC webbase corpus and the Wikipedia (Wikipedia dump of November 2014), used by three of the comparison systems. We use BabelNet 3.0 (SW2V BN ) and WordNet 3.0 (SW2V WN ) as semantic networks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Comparison systems. We compare with the publicly available pre-trained sense embeddings of four state-of-the-art models: Chen et al. (2014) 9 and AutoExtend 10 (Rothe and Sch\u00fctze, 2015) based on WordNet, and SensEmbed 11 (Iacobacci et al., 2015) and NASARI 12 (Camacho-Collados et al., 2016) based on BabelNet.", |
| "cite_spans": [ |
| { |
| "start": 221, |
| "end": 245, |
| "text": "(Iacobacci et al., 2015)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In this section we evaluate our sense representations on the standard SimLex-999 (Hill et al., 2015) and MEN (Bruni et al., 2014) word similarity datasets 13 . SimLex and MEN contain 999 and 3000 word pairs, respectively, which constitute, to our knowledge, the two largest similar-9 http://pan.baidu.com/s/1eQcPK8i 10 We used the AutoExtend code (http://cistern. cis.lmu.de/\u02dcsascha/AutoExtend/) to obtain sense vectors using W2V embeddings trained on UMBC (GoogleNews corpus used in their pre-trained models is not publicly available). We also tried the code to include BabelNet as lexical resource, but it was not easily scalable (BabelNet is two orders of magnitude larger than WordNet).", |
| "cite_spans": [ |
| { |
| "start": 81, |
| "end": 100, |
| "text": "(Hill et al., 2015)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 109, |
| "end": 129, |
| "text": "(Bruni et al., 2014)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Similarity", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "11 http://lcl.uniroma1.it/sensembed/ 12 http://lcl.uniroma1.it/nasari/ 13 To enable a fair comparison we did not perform experiments on the small datasets used in Section 5 for validation. Table 3 : Pearson (r) and Spearman (\u03c1) correlation performance on the SimLex-999 and MEN word similarity datasets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 189, |
| "end": 196, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Word Similarity", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "ity datasets comprising a balanced set of noun, verb and adjective instances. As explained in Section 5, we use the closest sense strategy for the word similarity measurement of our model and all sense-based comparison systems. As regards the word embedding models, words are directly compared by using cosine similarity. We also include a retrofitted version of the original word2vec word vectors (Faruqui et al., 2015, Retrofitting 14 ) using WordNet (Retrofitting WN ) and BabelNet (Retrofitting BN ) as lexical resources. Table 3 shows the results of SW2V and all comparison models in SimLex and MEN. SW2V consistently outperforms all sense-based comparison systems using the same corpus, and clearly performs better than the original word2vec trained on the same corpus. Retrofitting decreases the performance of the original word2vec on the Wikipedia corpus using BabelNet as lexical resource, but significantly improves the original word vectors on the UMBC corpus, obtaining comparable results to our approach. However, while our approach provides a shared space of words and senses, Retrofitting still conflates different meanings of a word into the same vector.", |
| "cite_spans": [ |
| { |
| "start": 398, |
| "end": 438, |
| "text": "(Faruqui et al., 2015, Retrofitting 14 )", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 526, |
| "end": 533, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Word Similarity", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Additionally, we noticed that most of the score divergences between our system and the gold standard scores in SimLex-999 were produced on antonym pairs, which are over-represented in this dataset: 38 word pairs hold a clear antonymy relation (e.g. encourage-discourage or long-short), while 41 additional pairs hold some degree of antonymy (e.g. new-ancient or man-woman). 15 In contrast to the consistently low gold similarity scores given to antonym pairs, our system varies its similarity scores depending on the specific nature of the pair 16 . Recent works have managed to obtain significant improvements by tweaking usual word embedding approaches into providing low similarity scores for antonym pairs (Pham et al., 2015; Schwartz et al., 2015; Nguyen et al., 2016; Mrksic et al., 2017) , but this is outside the scope of this paper.", |
| "cite_spans": [ |
| { |
| "start": 710, |
| "end": 729, |
| "text": "(Pham et al., 2015;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 730, |
| "end": 752, |
| "text": "Schwartz et al., 2015;", |
| "ref_id": "BIBREF58" |
| }, |
| { |
| "start": 753, |
| "end": 773, |
| "text": "Nguyen et al., 2016;", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 774, |
| "end": 794, |
| "text": "Mrksic et al., 2017)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Similarity", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Current lexical resources tend to suffer from the high granularity of their sense inventories . In fact, a meaningful clustering of their senses may lead to improvements on downstream tasks (Hovy et al., 2013; Flekova and Gurevych, 2016; Pilehvar et al., 2017) . In this section we evaluate our synset representations on the Wikipedia sense clustering task. For a fair comparison with respect to the BabelNet-based com- parison systems that use the Wikipedia corpus for training, in this experiment we report the results of our model trained on the Wikipedia corpus and using BabelNet as lexical resource only. For the evaluation we consider the two Wikipedia sense clustering datasets (500-pair and SemEval) created by Dandala et al. (2013) . In these datasets sense clustering is viewed as a binary classification task in which, given a pair of Wikipedia pages, the system has to decide whether to cluster them into a single instance or not. To this end, we use our synset embeddings and cluster Wikipedia pages 17 together if their similarity exceeds a threshold \u03b3. In order to set the optimal value of \u03b3, we follow Dandala et al. (2013) and use the first 500-pairs sense clustering dataset for tuning. We set the threshold \u03b3 to 0.35, which is the value leading to the highest F-Measure among all values from 0 to 1 with a 0.05 step size on the 500-pair dataset. Likewise, we set a threshold for NASARI (0.7) and SensEmbed (0.3) comparison systems. Finally, we evaluate our approach on the Se-mEval sense clustering test set. This test set consists of 925 pairs which were obtained from a set of highly ambiguous words gathered from past SemEval tasks. For comparison, we also include the supervised approach of Dandala et al. (2013) based on a multi-feature Support Vector Machine classifier trained on an automaticallylabeled dataset of the English Wikipedia (Mono-SVM) and Wikipedia in four different languages (Multi-SVM). As naive baseline we include the system which would cluster all given pairs. Table 4 shows the F-Measure and accuracy results on the SemEval sense clustering dataset. SW2V outperforms all comparison systems according to both measures, including the sense rep-resentations of NASARI and SensEmbed using the same setup and the same underlying lexical resource. This confirms the capability of our system to accurately capture the semantics of word senses on this sense-specific task.", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 209, |
| "text": "(Hovy et al., 2013;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 210, |
| "end": 237, |
| "text": "Flekova and Gurevych, 2016;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 238, |
| "end": 260, |
| "text": "Pilehvar et al., 2017)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 720, |
| "end": 741, |
| "text": "Dandala et al. (2013)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1119, |
| "end": 1140, |
| "text": "Dandala et al. (2013)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1715, |
| "end": 1736, |
| "text": "Dandala et al. (2013)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 2007, |
| "end": 2014, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sense Clustering", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "In the previous experiments we evaluated the effectiveness of the sense embeddings. In contrast, this experiment aims at testing the interconnectivity between word and sense embeddings in the vector space. As explained in Section 2, there have been previous approaches building a shared space of word and sense embeddings, but to date little research has focused on testing the semantic coherence of the vector space. To this end, we evaluate our model on a Word Sense Disambiguation (WSD) task, using our shared vector space of words and senses to obtain a Most Common Sense (MCS) baseline. The insight behind this experiment is that a semantically coherent shared space of words and senses should be able to build a relatively strong baseline for the task, as the MCS of a given word should be closer to the word vector than any other sense. The MCS baseline is generally integrated into the pipeline of stateof-the-art WSD and Entity Linking systems as a back-off strategy (Navigli, 2009; Jin et al., 2009; Zhong and Ng, 2010; Moro et al., 2014; Raganato et al., 2017) and is used in various NLP applications (Bennett et al., 2016) . Therefore, a system which automatically identifies the MCS of words from non-annotated text may be quite valuable, especially for resource-poor languages or large knowledge resources for which obtaining senseannotated corpora is extremely expensive. Moreover, even in a resource like WordNet for which sense-annotated data is available (Miller et al., 1993, SemCor) , 61% of its polysemous lemmas have no sense annotations (Bennett et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 976, |
| "end": 991, |
| "text": "(Navigli, 2009;", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 992, |
| "end": 1009, |
| "text": "Jin et al., 2009;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1010, |
| "end": 1029, |
| "text": "Zhong and Ng, 2010;", |
| "ref_id": "BIBREF66" |
| }, |
| { |
| "start": 1030, |
| "end": 1048, |
| "text": "Moro et al., 2014;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 1049, |
| "end": 1071, |
| "text": "Raganato et al., 2017)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 1112, |
| "end": 1134, |
| "text": "(Bennett et al., 2016)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1473, |
| "end": 1502, |
| "text": "(Miller et al., 1993, SemCor)", |
| "ref_id": null |
| }, |
| { |
| "start": 1560, |
| "end": 1582, |
| "text": "(Bennett et al., 2016)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word and sense interconnectivity", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "Given an input word w, we compute the cosine similarity between w and all its candidate senses, picking the sense leading to the highest similarity:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word and sense interconnectivity", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "M CS(w) = argmax s\u2208Sw cos( w, s)", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Word and sense interconnectivity", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "where cos( w, s) refers to the cosine similarity between the embeddings of w and s. In order to assess the reliability of SW2V against previous models using WordNet as sense inventory, we test our model on the all-words SemEval-2007 (task 17) (Pradhan et al., 2007) and SemEval-2013 (task 12) WSD datasets. Note that our model using BabelNet as semantic network has a far larger coverage than just WordNet and may additionally be used for Wikification (Mihalcea and Csomai, 2007) and Entity Linking tasks.", |
| "cite_spans": [ |
| { |
| "start": 243, |
| "end": 265, |
| "text": "(Pradhan et al., 2007)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 452, |
| "end": 479, |
| "text": "(Mihalcea and Csomai, 2007)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word and sense interconnectivity", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "Since the versions of WordNet vary across datasets and comparison systems, we decided to evaluate the systems on the portion of the datasets covered by all comparison systems 18 (less than 10% of instances were removed from each dataset). Table 5 shows the results of our system and AutoExtend on the SemEval-2007 and SemEval-2013 WSD datasets. SW2V provides the best MCS results in both datasets. In general, AutoExtend does not accurately capture the predominant sense of a word and performs worse than a baseline that selects the intended sense randomly from the set of all possible senses of the target word.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 239, |
| "end": 246, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Word and sense interconnectivity", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "In fact, AutoExtend tends to create clusters which include a word and all its possible senses. As an example, Table 6 shows the closest word and sense 19 embeddings of our SW2V model and Au-toExtend to the military and fish senses of, respectively, company and school. AutoExtend creates clusters with all the senses of company and school and their related instances, even if they belong to different domains (e.g., firm 2 n or business 1 n clearly concern the business sense of company). Instead, SW2V creates a semantic cluster of word and sense embeddings which are semantically close to the corresponding company 2 n and school 7 n senses.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 110, |
| "end": 117, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Word and sense interconnectivity", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "In this paper we proposed SW2V (Senses and Words to Vectors), a neural model which learns vector representations for words and senses in a joint training phase by exploiting both text corpora and knowledge from semantic networks. Data (in- Table 6 : Ten closest word and sense embeddings to the senses company 2 n (military unit) and school 7 n (group of fish).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 240, |
| "end": 247, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "cluding the preprocessed corpora and pre-trained embeddings used in the evaluation) and source code to apply our extension of the word2vec architecture to learn word and sense embeddings from any preprocessed corpus are freely available at http://lcl.uniroma1.it/sw2v. Unlike previous sense-based models which require post-processing steps and use WordNet as sense inventory, our model achieves a semantically coherent vector space of both words and senses as an emerging feature of a single training phase and is easily scalable to larger semantic networks like BabelNet. Finally, we showed, both quantitatively and qualitatively, some of the advantages of using our approach as against previous state-ofthe-art word-and sense-based models in various tasks, and highlighted interesting semantic properties of the resulting unified vector space of word and sense embeddings. As future work we plan to integrate a WSD and Entity Linking system for applying our model on downstream NLP applications, along the lines of Pilehvar et al. (2017) . We are also planning to apply our model to languages other than English and to study its potential on multilingual and crosslingual applications.", |
| "cite_spans": [ |
| { |
| "start": 1017, |
| "end": 1039, |
| "text": "Pilehvar et al. (2017)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In this paper we focus on senses but other items connected to words may be used (e.g. supersenses or images).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Our model can also produce a space of words and synset embeddings as output: the only difference is that all synonym senses would be considered to be the same item, i.e. a synset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://ebiquity.umbc. edu/blogger/2013/05/01/ umbc-webbase-corpus-of-3b-english-words/ 6 http://babelnet.org", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In this analysis we used the word similarity task for optimizing the sense embeddings, without caring about the performance of word embeddings or their interconnectivity. Therefore, this configuration may not be optimal for word embeddings and may be further tuned on specific applications. More information about different configurations in the documentation of the source code.8 http://babelfy.org", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/mfaruqui/ retrofitting", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Two annotators decided the degree of antonymy between word pairs: clear antonyms, weak antonyms or neither.16 For instance, the pairs sunset-sunrise and day-night are given, respectively, 1.88 and 2.47 gold scores in the 0-10 scale, while our model gives them a higher similarity score. In fact, both pairs appear as coordinate synsets in WordNet.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Since Wikipedia is a resource included in BabelNet, our synset representations are expandable to Wikipedia pages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We were unable to obtain the word embeddings of for comparison even after contacting the authors.19 FollowingNavigli (2009), word p n is the n th sense of word with part of speech p (using WordNet 3.0).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The authors gratefully acknowledge the support of the ERC Consolidator Grant MOUSSE No. 726487.Jose Camacho-Collados is supported by a Google Doctoral Fellowship in Natural Language Processing. We would also like to thank Jim Mc-Manus for his comments on the manuscript.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "A study on similarity and relatedness using distributional and WordNet-based approaches", |
| "authors": [ |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Enrique", |
| "middle": [], |
| "last": "Alfonseca", |
| "suffix": "" |
| }, |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Hall", |
| "suffix": "" |
| }, |
| { |
| "first": "Jana", |
| "middle": [], |
| "last": "Kravalova", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "19--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eneko Agirre, Enrique Alfonseca, Keith Hall, Jana Kravalova, Marius Pa\u015fca, and Aitor Soroa. 2009. A study on similarity and relatedness using distribu- tional and WordNet-based approaches. In Proceed- ings of NAACL. pages 19-27.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Random walks for knowledge-based word sense disambiguation", |
| "authors": [ |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Oier", |
| "middle": [], |
| "last": "Lopez De Lacalle", |
| "suffix": "" |
| }, |
| { |
| "first": "Aitor", |
| "middle": [], |
| "last": "Soroa", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Computational Linguistics", |
| "volume": "40", |
| "issue": "1", |
| "pages": "57--84", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eneko Agirre, Oier Lopez de Lacalle, and Aitor Soroa. 2014. Random walks for knowledge-based word sense disambiguation. Computational Linguistics 40(1):57-84.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A Neural Probabilistic Language Model", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9jean", |
| "middle": [], |
| "last": "Ducharme", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Janvin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "1137--1155", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, R\u00e9jean Ducharme, Pascal Vincent, and Christian Janvin. 2003. A Neural Probabilistic Lan- guage Model. The Journal of Machine Learning Re- search 3:1137-1155.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Lexsemtm: A semantic dataset based on all-words unsupervised sense distribution learning", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Bennett", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jey", |
| "middle": [ |
| "Han" |
| ], |
| "last": "Lau", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Francis", |
| "middle": [], |
| "last": "Bond", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1513--1524", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Bennett, Timothy Baldwin, Jey Han Lau, Di- ana McCarthy, and Francis Bond. 2016. Lexsemtm: A semantic dataset based on all-words unsupervised sense distribution learning. In Proceedings of ACL. pages 1513-1524.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Question answering with subgraph embeddings", |
| "authors": [ |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "615--620", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antoine Bordes, Sumit Chopra, and Jason Weston. 2014. Question answering with subgraph embed- dings. In Proceedings of EMNLP. pages 615-620.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Multimodal distributional semantics", |
| "authors": [ |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Nam-Khanh", |
| "middle": [], |
| "last": "Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "J. Artif. Intell. Res.(JAIR)", |
| "volume": "49", |
| "issue": "", |
| "pages": "1--47", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elia Bruni, Nam-Khanh Tran, and Marco Baroni. 2014. Multimodal distributional semantics. J. Artif. Intell. Res.(JAIR) 49(1-47).", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Evaluating WordNet-based measures of Lexical Semantic Relatedness", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Budanitsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Graeme", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Computational Linguistics", |
| "volume": "32", |
| "issue": "1", |
| "pages": "13--47", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander Budanitsky and Graeme Hirst. 2006. Evalu- ating WordNet-based measures of Lexical Semantic Relatedness. Computational Linguistics 32(1):13- 47.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A Unified Multilingual Semantic Representation of Concepts", |
| "authors": [ |
| { |
| "first": "Jos\u00e9", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "741--751", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jos\u00e9 Camacho-Collados, Mohammad Taher Pilehvar, and Roberto Navigli. 2015. A Unified Multilingual Semantic Representation of Concepts. In Proceed- ings of ACL. Beijing, China, pages 741-751.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Nasari: Integrating explicit knowledge and corpus statistics for a multilingual representation of concepts and entities", |
| "authors": [ |
| { |
| "first": "Jos\u00e9", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Artificial Intelligence", |
| "volume": "240", |
| "issue": "", |
| "pages": "36--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jos\u00e9 Camacho-Collados, Mohammad Taher Pilehvar, and Roberto Navigli. 2016. Nasari: Integrating ex- plicit knowledge and corpus statistics for a multilin- gual representation of concepts and entities. Artifi- cial Intelligence 240:36-64.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A unified model for word sense representation and disambiguation", |
| "authors": [ |
| { |
| "first": "Xinxiong", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1025--1035", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xinxiong Chen, Zhiyuan Liu, and Maosong Sun. 2014. A unified model for word sense representation and disambiguation. In Proceedings of EMNLP. Doha, Qatar, pages 1025-1035.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Sense clustering using Wikipedia", |
| "authors": [ |
| { |
| "first": "Bharath", |
| "middle": [], |
| "last": "Dandala", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Hokamp", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Razvan", |
| "middle": [ |
| "C" |
| ], |
| "last": "Bunescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. of RANLP. Hissar", |
| "volume": "", |
| "issue": "", |
| "pages": "164--171", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharath Dandala, Chris Hokamp, Rada Mihalcea, and Razvan C. Bunescu. 2013. Sense clustering using Wikipedia. In Proc. of RANLP. Hissar, Bulgaria, pages 164-171.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Clustering and diversifying web search results with graphbased word sense induction", |
| "authors": [ |
| { |
| "first": "Antonio", |
| "middle": [ |
| "Di" |
| ], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Computational Linguistics", |
| "volume": "39", |
| "issue": "3", |
| "pages": "709--754", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antonio Di Marco and Roberto Navigli. 2013. Cluster- ing and diversifying web search results with graph- based word sense induction. Computational Lin- guistics 39(3):709-754.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Measuring word meaning in context", |
| "authors": [ |
| { |
| "first": "Katrin", |
| "middle": [], |
| "last": "Erk", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicholas", |
| "middle": [], |
| "last": "Gaylord", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Computational Linguistics", |
| "volume": "39", |
| "issue": "3", |
| "pages": "511--554", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katrin Erk, Diana McCarthy, and Nicholas Gaylord. 2013. Measuring word meaning in context. Com- putational Linguistics 39(3):511-554.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Retrofitting Sense-Specific Word Vectors Using Parallel Text", |
| "authors": [ |
| { |
| "first": "Allyson", |
| "middle": [], |
| "last": "Ettinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| }, |
| { |
| "first": "Marine", |
| "middle": [], |
| "last": "Carpuat", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "1378--1383", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Allyson Ettinger, Philip Resnik, and Marine Carpuat. 2016. Retrofitting Sense-Specific Word Vectors Us- ing Parallel Text. In Proceedings of NAACL-HLT. pages 1378-1383.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Entity disambiguation by knowledge and text jointly embedding", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianwen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dilin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "260--269", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Fang, Jianwen Zhang, Dilin Wang, Zheng Chen, and Ming Li. 2016. Entity disambiguation by knowledge and text jointly embedding. In Proceed- ings of CoNLL. pages 260-269.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Retrofitting word vectors to semantic lexicons", |
| "authors": [ |
| { |
| "first": "Manaal", |
| "middle": [], |
| "last": "Faruqui", |
| "suffix": "" |
| }, |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Dodge", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Sujay", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Jauhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1606--1615", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manaal Faruqui, Jesse Dodge, Sujay K. Jauhar, Chris Dyer, Eduard Hovy, and Noah A. Smith. 2015. Retrofitting word vectors to semantic lexicons. In Proceedings of NAACL. pages 1606-1615.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Placing search in context: The concept revisited", |
| "authors": [ |
| { |
| "first": "Lev", |
| "middle": [], |
| "last": "Finkelstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabrilovich", |
| "middle": [], |
| "last": "Evgeniy", |
| "suffix": "" |
| }, |
| { |
| "first": "Matias", |
| "middle": [], |
| "last": "Yossi", |
| "suffix": "" |
| }, |
| { |
| "first": "Rivlin", |
| "middle": [], |
| "last": "Ehud", |
| "suffix": "" |
| }, |
| { |
| "first": "Solan", |
| "middle": [], |
| "last": "Zach", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfman", |
| "middle": [], |
| "last": "Gadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruppin", |
| "middle": [], |
| "last": "Eytan", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "ACM Transactions on Information Systems", |
| "volume": "20", |
| "issue": "1", |
| "pages": "116--131", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lev Finkelstein, Gabrilovich Evgeniy, Matias Yossi, Rivlin Ehud, Solan Zach, Wolfman Gadi, and Rup- pin Eytan. 2002. Placing search in context: The con- cept revisited. ACM Transactions on Information Systems 20(1):116-131.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Supersense embeddings: A unified model for supersense interpretation, prediction, and utilization", |
| "authors": [ |
| { |
| "first": "Lucie", |
| "middle": [], |
| "last": "Flekova", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "2029--2041", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucie Flekova and Iryna Gurevych. 2016. Supersense embeddings: A unified model for supersense inter- pretation, prediction, and utilization. In Proceedings of ACL. pages 2029-2041.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Random walks and neural network language models on knowledge bases", |
| "authors": [ |
| { |
| "first": "Josu", |
| "middle": [], |
| "last": "Goikoetxea", |
| "suffix": "" |
| }, |
| { |
| "first": "Aitor", |
| "middle": [], |
| "last": "Soroa", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Basque Country", |
| "middle": [], |
| "last": "Donostia", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1434--1439", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Josu Goikoetxea, Aitor Soroa, Eneko Agirre, and Basque Country Donostia. 2015. Random walks and neural network language models on knowledge bases. In Proceedings of NAACL. pages 1434-1439.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Learning sense-specific word embeddings by exploiting bilingual resources", |
| "authors": [ |
| { |
| "first": "Jiang", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "497--507", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiang Guo, Wanxiang Che, Haifeng Wang, and Ting Liu. 2014. Learning sense-specific word embed- dings by exploiting bilingual resources. In Proceed- ings of COLING. pages 497-507.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "UMBC EBIQUITY-CORE: Semantic textual similarity systems", |
| "authors": [ |
| { |
| "first": "Lushan", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhay", |
| "middle": [], |
| "last": "Kashyap", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Finin", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Mayfield", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Weese", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Second Joint Conference on Lexical and Computational Semantics", |
| "volume": "1", |
| "issue": "", |
| "pages": "44--52", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lushan Han, Abhay Kashyap, Tim Finin, James Mayfield, and Jonathan Weese. 2013. UMBC EBIQUITY-CORE: Semantic textual similarity sys- tems. In Proceedings of the Second Joint Confer- ence on Lexical and Computational Semantics. vol- ume 1, pages 44-52.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Simlex-999: Evaluating semantic models with (genuine) similarity estimation", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Hill, Roi Reichart, and Anna Korhonen. 2015. Simlex-999: Evaluating semantic models with (gen- uine) similarity estimation. Computational Linguis- tics .", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Kore: keyphrase overlap relatedness for entity disambiguation", |
| "authors": [ |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Hoffart", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Seufert", |
| "suffix": "" |
| }, |
| { |
| "first": "Dat", |
| "middle": [], |
| "last": "Ba Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Theobald", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerhard", |
| "middle": [], |
| "last": "Weikum", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of CIKM", |
| "volume": "", |
| "issue": "", |
| "pages": "545--554", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johannes Hoffart, Stephan Seufert, Dat Ba Nguyen, Martin Theobald, and Gerhard Weikum. 2012. Kore: keyphrase overlap relatedness for entity dis- ambiguation. In Proceedings of CIKM. pages 545- 554.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Collaboratively built semistructured content and Artificial Intelligence: The story so far", |
| "authors": [ |
| { |
| "first": "Eduard", |
| "middle": [ |
| "H" |
| ], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "Simone", |
| "middle": [ |
| "Paolo" |
| ], |
| "last": "Ponzetto", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Artificial Intelligence", |
| "volume": "194", |
| "issue": "", |
| "pages": "2--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eduard H. Hovy, Roberto Navigli, and Simone Paolo Ponzetto. 2013. Collaboratively built semi- structured content and Artificial Intelligence: The story so far. Artificial Intelligence 194:2-27.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Improving word representations via global context and multiple word prototypes", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [ |
| "H" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "873--882", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric H. Huang, Richard Socher, Christopher D. Man- ning, and Andrew Y. Ng. 2012. Improving word representations via global context and multiple word prototypes. In Proc. of ACL. Jeju Island, Korea, pages 873-882.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Sensembed: Learning sense embeddings for word and relational similarity", |
| "authors": [ |
| { |
| "first": "Ignacio", |
| "middle": [], |
| "last": "Iacobacci", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "95--105", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ignacio Iacobacci, Mohammad Taher Pilehvar, and Roberto Navigli. 2015. Sensembed: Learning sense embeddings for word and relational similarity. In Proceedings of ACL. Beijing, China, pages 95-105.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Embeddings for Word Sense Disambiguation: An Evaluation Study", |
| "authors": [ |
| { |
| "first": "Ignacio", |
| "middle": [], |
| "last": "Iacobacci", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "897--907", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ignacio Iacobacci, Mohammad Taher Pilehvar, and Roberto Navigli. 2016. Embeddings for Word Sense Disambiguation: An Evaluation Study. In Proceed- ings of ACL. pages 897-907.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Ontologically grounded multi-sense representation learning for semantic vector space models", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Sujay Kumar Jauhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sujay Kumar Jauhar, Chris Dyer, and Eduard Hovy. 2015. Ontologically grounded multi-sense represen- tation learning for semantic vector space models. In Proceedings of NAACL.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Estimating and exploiting the entropy of sense distributions", |
| "authors": [ |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Peng Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "233--236", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peng Jin, Diana McCarthy, Rob Koeling, and John Car- roll. 2009. Estimating and exploiting the entropy of sense distributions. In Proceedings of NAACL (2). pages 233-236.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Embedding a semantic network in a word space", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Johansson", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis Nieto", |
| "middle": [], |
| "last": "Pina", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1428--1433", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Johansson and Luis Nieto Pina. 2015. Embed- ding a semantic network in a word space. In Pro- ceedings of NAACL. pages 1428-1433.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Improving distributional similarity with lessons learned from word embeddings", |
| "authors": [ |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Ido", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "TACL", |
| "volume": "3", |
| "issue": "", |
| "pages": "211--225", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omer Levy, Yoav Goldberg, and Ido Dagan. 2015. Im- proving distributional similarity with lessons learned from word embeddings. TACL 3:211-225.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Do multi-sense embeddings improve natural language understanding?", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li and Dan Jurafsky. 2015. Do multi-sense em- beddings improve natural language understanding? In Proceedings of EMNLP. Lisbon, Portugal.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Learning Generic Context Embedding with Bidirectional LSTM", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Melamud", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Goldberger", |
| "suffix": "" |
| }, |
| { |
| "first": "Ido", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proc. of CONLL", |
| "volume": "2", |
| "issue": "", |
| "pages": "51--61", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oren Melamud, Jacob Goldberger, and Ido Dagan. 2016. context2vec: Learning Generic Context Em- bedding with Bidirectional LSTM. In Proc. of CONLL. pages 51-61.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Wikify! Linking documents to encyclopedic knowledge", |
| "authors": [ |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Andras", |
| "middle": [], |
| "last": "Csomai", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the Sixteenth ACM Conference on Information and Knowledge management", |
| "volume": "", |
| "issue": "", |
| "pages": "233--242", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rada Mihalcea and Andras Csomai. 2007. Wikify! Linking documents to encyclopedic knowledge. In Proceedings of the Sixteenth ACM Conference on Information and Knowledge management. Lisbon, Portugal, pages 233-242.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word represen- tations in vector space. CoRR abs/1301.3781.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Wordnet: a lexical database for english", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Communications of the ACM", |
| "volume": "38", |
| "issue": "11", |
| "pages": "39--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A Miller. 1995. Wordnet: a lexical database for english. Communications of the ACM 38(11):39- 41.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "A semantic concordance", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [ |
| "A" |
| ], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Claudia", |
| "middle": [], |
| "last": "Leacock", |
| "suffix": "" |
| }, |
| { |
| "first": "Randee", |
| "middle": [], |
| "last": "Tengi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ross", |
| "middle": [], |
| "last": "Bunker", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Proceedings of the 3rd DARPA Workshop on Human Language Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "303--308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A. Miller, Claudia Leacock, Randee Tengi, and Ross Bunker. 1993. A semantic concordance. In Proceedings of the 3rd DARPA Workshop on Human Language Technology. Plainsboro, N.J., pages 303- 308.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Entity Linking meets Word Sense Disambiguation: a Unified Approach", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Moro", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Raganato", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "TACL", |
| "volume": "2", |
| "issue": "", |
| "pages": "231--244", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Moro, Alessandro Raganato, and Roberto Nav- igli. 2014. Entity Linking meets Word Sense Disam- biguation: a Unified Approach. TACL 2:231-244.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Semantic Specialisation of Distributional Word Vector Spaces using Monolingual and Cross-Lingual Constraints", |
| "authors": [ |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrksic", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Diarmuid\u00f3", |
| "middle": [], |
| "last": "S\u00e9aghdha", |
| "suffix": "" |
| }, |
| { |
| "first": "Ira", |
| "middle": [], |
| "last": "Leviant", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Gai", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikola Mrksic, Ivan Vuli\u0107, Diarmuid\u00d3 S\u00e9aghdha, Ira Leviant, Roi Reichart, Milica Gai, Anna Korhonen, and Steve Young. 2017. Semantic Specialisation of Distributional Word Vector Spaces using Monolin- gual and Cross-Lingual Constraints. TACL .", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Word Sense Disambiguation: A survey", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "ACM Computing Surveys", |
| "volume": "41", |
| "issue": "2", |
| "pages": "1--69", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli. 2009. Word Sense Disambiguation: A survey. ACM Computing Surveys 41(2):1-69.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "SemEval-2013 Task 12: Multilingual Word Sense Disambiguation", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Jurgens", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniele", |
| "middle": [], |
| "last": "Vannella", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of SemEval 2013", |
| "volume": "", |
| "issue": "", |
| "pages": "222--231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli, David Jurgens, and Daniele Vannella. 2013. SemEval-2013 Task 12: Multilingual Word Sense Disambiguation. In Proceedings of SemEval 2013. pages 222-231.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "BabelNet: The automatic construction, evaluation and application of a wide-coverage multilingual semantic network", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "Simone", |
| "middle": [ |
| "Paolo" |
| ], |
| "last": "Ponzetto", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "AIJ", |
| "volume": "193", |
| "issue": "", |
| "pages": "217--250", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli and Simone Paolo Ponzetto. 2012. BabelNet: The automatic construction, evaluation and application of a wide-coverage multilingual se- mantic network. AIJ 193:217-250.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Efficient nonparametric estimation of multiple embeddings per word in vector space", |
| "authors": [ |
| { |
| "first": "Arvind", |
| "middle": [], |
| "last": "Neelakantan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeevan", |
| "middle": [], |
| "last": "Shankar", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Passos", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1059--1069", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arvind Neelakantan, Jeevan Shankar, Alexandre Pas- sos, and Andrew McCallum. 2014. Efficient non- parametric estimation of multiple embeddings per word in vector space. In Proceedings of EMNLP. Doha, Qatar, pages 1059-1069.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Integrating distributional lexical contrast into word embeddings for antonymsynonym distinction", |
| "authors": [ |
| { |
| "first": "Sabine", |
| "middle": [], |
| "last": "Kim Anh Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ngoc", |
| "middle": [ |
| "Thang" |
| ], |
| "last": "Schulte Im Walde", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "454--459", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kim Anh Nguyen, Sabine Schulte im Walde, and Ngoc Thang Vu. 2016. Integrating distributional lexical contrast into word embeddings for antonym- synonym distinction. In Proceedings of ACL. pages 454-459.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Making fine-grained and coarse-grained sense distinctions, both manually and automatically", |
| "authors": [ |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoa", |
| "middle": [], |
| "last": "Dang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christiane", |
| "middle": [], |
| "last": "Fellbaum", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Natural Language Engineering", |
| "volume": "13", |
| "issue": "2", |
| "pages": "137--163", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martha Palmer, Hoa Dang, and Christiane Fellbaum. 2007. Making fine-grained and coarse-grained sense distinctions, both manually and automatically. Natural Language Engineering 13(2):137-163.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Unsupervised does not mean uninterpretable: The case for word sense induction and disambiguation", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Panchenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugen", |
| "middle": [], |
| "last": "Ruppert", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefano", |
| "middle": [], |
| "last": "Faralli", |
| "suffix": "" |
| }, |
| { |
| "first": "Simone", |
| "middle": [ |
| "Paolo" |
| ], |
| "last": "Ponzetto", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Biemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "86--98", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander Panchenko, Eugen Ruppert, Stefano Faralli, Simone Paolo Ponzetto, and Chris Biemann. 2017. Unsupervised does not mean uninterpretable: The case for word sense induction and disambiguation. In Proceedings of EACL. pages 86-98.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "GloVe: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of EMNLP. pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "A multitask objective to inject lexical contrast into distributional semantics", |
| "authors": [ |
| { |
| "first": "Angeliki", |
| "middle": [], |
| "last": "Nghia The Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Lazaridou", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "21--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nghia The Pham, Angeliki Lazaridou, and Marco Ba- roni. 2015. A multitask objective to inject lexical contrast into distributional semantics. In Proceed- ings of ACL. pages 21-26.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Towards a Seamless Integration of Word Senses into Downstream NLP Applications", |
| "authors": [ |
| { |
| "first": "Jose", |
| "middle": [], |
| "last": "Mohammad Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| }, |
| { |
| "first": "Nigel", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Collier", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad Taher Pilehvar, Jose Camacho-Collados, Roberto Navigli, and Nigel Collier. 2017. Towards a Seamless Integration of Word Senses into Down- stream NLP Applications. In Proceedings of ACL. Vancouver, Canada.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "De-conflated semantic representations", |
| "authors": [ |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Nigel", |
| "middle": [], |
| "last": "Collier", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad Taher Pilehvar and Nigel Collier. 2016. De-conflated semantic representations. In Proceed- ings of EMNLP. Austin, TX.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "SemEval-2007 task-17: English lexical sample, SRL and all words", |
| "authors": [ |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Loper", |
| "suffix": "" |
| }, |
| { |
| "first": "Dmitriy", |
| "middle": [], |
| "last": "Dligach", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of SemEval", |
| "volume": "", |
| "issue": "", |
| "pages": "87--92", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sameer Pradhan, Edward Loper, Dmitriy Dligach, and Martha Palmer. 2007. SemEval-2007 task-17: En- glish lexical sample, SRL and all words. In Pro- ceedings of SemEval. pages 87-92.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Contextdependent sense embedding", |
| "authors": [ |
| { |
| "first": "Lin", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kewei", |
| "middle": [], |
| "last": "Tu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "183--191", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lin Qiu, Kewei Tu, and Yong Yu. 2016. Context- dependent sense embedding. In Proceedings of EMNLP. Austin, Texas, pages 183-191.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Word Sense Disambiguation: A Unified Evaluation Framework and Empirical Comparison", |
| "authors": [ |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Raganato", |
| "suffix": "" |
| }, |
| { |
| "first": "Jose", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "99--110", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alessandro Raganato, Jose Camacho-Collados, and Roberto Navigli. 2017. Word Sense Disambigua- tion: A Unified Evaluation Framework and Empir- ical Comparison. In Proceedings of EACL. pages 99-110.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Multi-prototype vector-space models of word meaning", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Reisinger", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Raymond", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "109--117", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph Reisinger and Raymond J. Mooney. 2010. Multi-prototype vector-space models of word mean- ing. In Proceedings of ACL. pages 109-117.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Using information content to evaluate semantic similarity in a taxonomy", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Proceedings of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "448--453", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Resnik. 1995. Using information content to evaluate semantic similarity in a taxonomy. In Pro- ceedings of IJCAI. pages 448-453.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "AutoExtend: Extending Word Embeddings to Embeddings for Synsets and Lexemes", |
| "authors": [ |
| { |
| "first": "Sascha", |
| "middle": [], |
| "last": "Rothe", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1793--1803", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sascha Rothe and Hinrich Sch\u00fctze. 2015. AutoEx- tend: Extending Word Embeddings to Embeddings for Synsets and Lexemes. In Proceedings of ACL. Beijing, China, pages 1793-1803.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "Contextual correlates of synonymy", |
| "authors": [ |
| { |
| "first": "Herbert", |
| "middle": [], |
| "last": "Rubenstein", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "B" |
| ], |
| "last": "Goodenough", |
| "suffix": "" |
| } |
| ], |
| "year": 1965, |
| "venue": "Commun. ACM", |
| "volume": "8", |
| "issue": "10", |
| "pages": "627--633", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Herbert Rubenstein and John B. Goodenough. 1965. Contextual correlates of synonymy. Commun. ACM 8(10):627-633.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Automatic word sense discrimination", |
| "authors": [ |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Computational linguistics", |
| "volume": "24", |
| "issue": "1", |
| "pages": "97--123", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hinrich Sch\u00fctze. 1998. Automatic word sense discrim- ination. Computational linguistics 24(1):97-123.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Symmetric pattern based word embeddings for improved word similarity prediction", |
| "authors": [ |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Rappoport", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "258--267", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roy Schwartz, Roi Reichart, and Ari Rappoport. 2015. Symmetric pattern based word embeddings for im- proved word similarity prediction. In Proceedings of CoNLL. pages 258-267.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "Conceptnet at semeval-2017 task 2: Extending word embeddings with multilingual relational knowledge", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Speer", |
| "suffix": "" |
| }, |
| { |
| "first": "Joanna", |
| "middle": [], |
| "last": "Lowry-Duda", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "76--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Speer and Joanna Lowry-Duda. 2017. Con- ceptnet at semeval-2017 task 2: Extending word em- beddings with multilingual relational knowledge. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017). pages 76-80.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "Bilingual learning of multi-sense embeddings with discrete autoencoders", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Simon\u0161uster", |
| "suffix": "" |
| }, |
| { |
| "first": "Gertjan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Van Noord", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "1346--1356", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simon\u0160uster, Ivan Titov, and Gertjan van Noord. 2016. Bilingual learning of multi-sense embeddings with discrete autoencoders. In Proceedings of NAACL- HLT. pages 1346-1356.", |
| "links": null |
| }, |
| "BIBREF61": { |
| "ref_id": "b61", |
| "title": "A probabilistic model for learning multi-prototype word embeddings", |
| "authors": [ |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanjun", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiang", |
| "middle": [], |
| "last": "Bian", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Enhong", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "151--160", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fei Tian, Hanjun Dai, Jiang Bian, Bin Gao, Rui Zhang, Enhong Chen, and Tie-Yan Liu. 2014. A probabilis- tic model for learning multi-prototype word embed- dings. In Proceedings of COLING. pages 151-160.", |
| "links": null |
| }, |
| "BIBREF62": { |
| "ref_id": "b62", |
| "title": "K-embeddings: Learning conceptual embeddings for words using context", |
| "authors": [ |
| { |
| "first": "Thuy", |
| "middle": [], |
| "last": "Vu", |
| "suffix": "" |
| }, |
| { |
| "first": "D Stott", |
| "middle": [], |
| "last": "Parker", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "1262--1267", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thuy Vu and D Stott Parker. 2016. K-embeddings: Learning conceptual embeddings for words using context. In Proceedings of NAACL-HLT. pages 1262-1267.", |
| "links": null |
| }, |
| "BIBREF63": { |
| "ref_id": "b63", |
| "title": "Knowledge graph and text jointly embedding", |
| "authors": [ |
| { |
| "first": "Zhen", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianwen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianlin", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1591--1601", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhen Wang, Jianwen Zhang, Jianlin Feng, and Zheng Chen. 2014. Knowledge graph and text jointly em- bedding. In Proceedings of EMNLP. pages 1591- 1601.", |
| "links": null |
| }, |
| "BIBREF64": { |
| "ref_id": "b64", |
| "title": "Structured training for neural network transition-based parsing", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Alberti", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "323--333", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Weiss, Chris Alberti, Michael Collins, and Slav Petrov. 2015. Structured training for neural network transition-based parsing. In Proceedings of ACL. Beijing, China, pages 323-333.", |
| "links": null |
| }, |
| "BIBREF65": { |
| "ref_id": "b65", |
| "title": "Improving lexical embeddings with semantic knowledge", |
| "authors": [ |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "545--550", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mo Yu and Mark Dredze. 2014. Improving lexical em- beddings with semantic knowledge. In Proceedings of ACL (2). pages 545-550.", |
| "links": null |
| }, |
| "BIBREF66": { |
| "ref_id": "b66", |
| "title": "It Makes Sense: A wide-coverage Word Sense Disambiguation system for free text", |
| "authors": [ |
| { |
| "first": "Zhi", |
| "middle": [], |
| "last": "Zhong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. of ACL System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "78--83", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhi Zhong and Hwee Tou Ng. 2010. It Makes Sense: A wide-coverage Word Sense Disambiguation system for free text. In Proc. of ACL System Demonstra- tions. pages 78-83.", |
| "links": null |
| }, |
| "BIBREF67": { |
| "ref_id": "b67", |
| "title": "Bilingual word embeddings for phrase-based machine translation", |
| "authors": [ |
| { |
| "first": "Will", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Zou", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Daniel", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1393--1398", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Will Y. Zou, Richard Socher, Daniel M Cer, and Christopher D Manning. 2013. Bilingual word em- beddings for phrase-based machine translation. In Proceedings of EMNLP. pages 1393-1398.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td/><td/><td/><td/><td/><td colspan=\"2\">Output</td><td/><td/><td/><td/><td/></tr><tr><td/><td colspan=\"2\">Words</td><td/><td/><td colspan=\"2\">Senses</td><td/><td/><td>Both</td><td/><td/></tr><tr><td colspan=\"2\">WS-Sim</td><td colspan=\"2\">RG-65</td><td colspan=\"2\">WS-Sim</td><td colspan=\"2\">RG-65</td><td colspan=\"2\">WS-Sim</td><td colspan=\"2\">RG-65</td></tr><tr><td>r</td><td>\u03c1</td><td>r</td><td>\u03c1</td><td>r</td><td>\u03c1</td><td>r</td><td>\u03c1</td><td>r</td><td>\u03c1</td><td>r</td><td>\u03c1</td></tr></table>", |
| "text": "InputWords0.49 0.48 0.65 0.66 0.56 0.56 0.67 0.67 0.54 0.53 0.66 0.65 Senses 0.69 0.69 0.70 0.71 0.69 0.70 0.70 0.74 0.72 0.71 0.71 0.74 Both 0.60 0.65 0.67 0.70 0.62 0.65 0.66 0.67 0.65 0.71 0.68 0.70", |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td colspan=\"5\">: Pearson (r) and Spearman (\u03c1) correlation performance of the nine configurations of SW2V</td></tr><tr><td/><td colspan=\"2\">WS-Sim</td><td colspan=\"2\">RG-65</td></tr><tr><td/><td>r</td><td>\u03c1</td><td>r</td><td>\u03c1</td></tr><tr><td>Shallow</td><td colspan=\"4\">0.72 0.71 0.71 0.74</td></tr><tr><td>Babelfy</td><td colspan=\"4\">0.65 0.63 0.69 0.70</td></tr><tr><td colspan=\"5\">Babelfy* 0.63 0.61 0.65 0.64</td></tr></table>", |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "F-Measure percentage of different MCS strategies on the SemEval-2007 and SemEval-2013 WSD datasets.", |
| "type_str": "table" |
| } |
| } |
| } |
| } |