| { |
| "paper_id": "C14-1048", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:21:55.405536Z" |
| }, |
| "title": "Learning Sense-specific Word Embeddings By Exploiting Bilingual Resources", |
| "authors": [ |
| { |
| "first": "Jiang", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Harbin Institute of Technology", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "jguo@ir.hit.edu.cn" |
| }, |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Harbin Institute of Technology", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Baidu Inc", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "wanghaifeng@baidu.com" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Harbin Institute of Technology", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "tliu@ir.hit.edu.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Recent work has shown success in learning word embeddings with neural network language models (NNLM). However, the majority of previous NNLMs represent each word with a single embedding, which fails to capture polysemy. In this paper, we address this problem by representing words with multiple and sense-specific embeddings, which are learned from bilingual parallel data. We evaluate our embeddings using the word similarity measurement and show that our approach is significantly better in capturing the sense-level word similarities. We further feed our embeddings as features in Chinese named entity recognition and obtain noticeable improvements against single embeddings.", |
| "pdf_parse": { |
| "paper_id": "C14-1048", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Recent work has shown success in learning word embeddings with neural network language models (NNLM). However, the majority of previous NNLMs represent each word with a single embedding, which fails to capture polysemy. In this paper, we address this problem by representing words with multiple and sense-specific embeddings, which are learned from bilingual parallel data. We evaluate our embeddings using the word similarity measurement and show that our approach is significantly better in capturing the sense-level word similarities. We further feed our embeddings as features in Chinese named entity recognition and obtain noticeable improvements against single embeddings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Word embeddings are conventionally defined as compact, real-valued, and low-dimensional vector representations for words. Each dimension of word embedding represents a latent feature of the word, hopefully capturing useful syntactic and semantic characteristics. Word embeddings can be used straightforwardly for computing word similarities, which benefits many practical applications (Socher et al., 2011; Mikolov et al., 2013a) . They are also shown to be effective as input to NLP systems (Collobert et al., 2011) or as features in various NLP tasks (Turian et al., 2010; Yu et al., 2013) .", |
| "cite_spans": [ |
| { |
| "start": 385, |
| "end": 406, |
| "text": "(Socher et al., 2011;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 407, |
| "end": 429, |
| "text": "Mikolov et al., 2013a)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 492, |
| "end": 516, |
| "text": "(Collobert et al., 2011)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 553, |
| "end": 574, |
| "text": "(Turian et al., 2010;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 575, |
| "end": 591, |
| "text": "Yu et al., 2013)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In recent years, neural network language models (NNLMs) have become popular architectures for learning word embeddings (Bengio et al., 2003; Mnih and Hinton, 2008; Mikolov et al., 2013b) . Most of the previous NNLMs represent each word with a single embedding, which ignores polysemy. In an attempt to better capture the multiple senses or usages of a word, several multi-prototype models have been proposed (Reisinger and Mooney, 2010; Huang et al., 2012) . These multi-prototype models simply induce K prototypes (embeddings) for every word in the vocabulary, where K is predefined as a fixed value. These models still may not capture the real senses of words, because different words may have different number of senses.", |
| "cite_spans": [ |
| { |
| "start": 119, |
| "end": 140, |
| "text": "(Bengio et al., 2003;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 141, |
| "end": 163, |
| "text": "Mnih and Hinton, 2008;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 164, |
| "end": 186, |
| "text": "Mikolov et al., 2013b)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 408, |
| "end": 436, |
| "text": "(Reisinger and Mooney, 2010;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 437, |
| "end": 456, |
| "text": "Huang et al., 2012)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We present a novel and simple method of learning sense-specific word embeddings by using bilingual parallel data. In this method, word sense induction (WSI) is performed prior to the training of NNLMs. We exploit bilingual parallel data for WSI, which is motivated by the intuition that the same word in the source language with different senses is supposed to have different translations in the foreign language. 1 For instance, \u5236\u670d can be translated as investment / overpower / subdue / subjugate / uniform, etc. Among all of these translations, subdue / overpower / subjugate express the same sense of \u5236\u670d, whereas uniform / investment express a different sense. Therefore, we could effectively obtain the senses of one word by clustering its translation words, exhibiting different senses in different clusters.", |
| "cite_spans": [ |
| { |
| "start": 414, |
| "end": 415, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The created clusters are then projected back into the words in the source language texts, forming a sense-labeled training data. The sense-labeled data are then trained with recurrent neural network langugae model (RNNLM) (Mikolov, 2012) , a kind of NNLM, to obtain sense-specific word embeddings. As a concrete example, Figure 1 illustrates the process of learning sense-specific embeddings. To evaluate the sense-specific word embeddings we have learned, we manually construct a Chinese polysemous word similarity dataset that contains 401 pairs of words with human-judged similarities. The performance of our method on this dataset shows that sense-specific embeddings are significantly better in capturing the sense-level similarities for polysemous words.", |
| "cite_spans": [ |
| { |
| "start": 222, |
| "end": 237, |
| "text": "(Mikolov, 2012)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 321, |
| "end": 329, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2026 \u5236\u670d #2 , \u5f81\u670d \u6216 \u63a7\u5236 \u5bf9\u624b \u3002 1 \u8eab\u7a7f \u8be5 \u5382 \u5236\u670d #1 \u7684 \u5de5\u4eba \u2026 2 \u5979 \u5236\u670d #2 \u4e86 \u7a83\u8d3c \u3002 3 \u5728 \u6559\u5802 \uff0c \u4ed6\u4eec \u8eab\u7a7f \u7267\u5e08 \u7684 \u5236\u670d #1 \u3002 4 \u2026\u2026 5 \u2462 Project \u2460 Extract \u2463 RNNLM", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "We also evaluate our embeddings by feeding them as features to the task of Chinese named entity recognition (NER), which is a simple semi-supervised learning mechanism (Turian et al., 2010) . In order to use sense-specific embeddings as features, we should discriminate the word senses for the NER data first. Therefore, we further develop a novel monolingual word sense disambiguation (WSD) algorithm based on the RNNLM we have already trained previously. NER results show that sense-specific embeddings provide noticeable improvements over traditional single embeddings.", |
| "cite_spans": [ |
| { |
| "start": 168, |
| "end": 189, |
| "text": "(Turian et al., 2010)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "Our contribution in this paper is twofold:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 We propose a novel approach of learning sense-specific word embeddings by utilizing bilingual parallel data (Section 3). Evaluation on a manually constructed polysemous word similarity dataset shows that our approach better captures word similarities (Section 5.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 To use the sense-specific embeddings in practical applications, we develop a novel WSD algorithm for monolingual data based on RNNLM (Section 4). Using the algorithm, we feed the sense-specific embeddings as additional features to NER and achieve significant improvement (Section 5.3).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "2 Background: Word Embedding and RNNLM", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "There has been a line of research on learning word embeddings via NNLMs (Bengio et al., 2003; Mnih and Hinton, 2008; Mikolov et al., 2013b) . NNLMs are language models that exploit neural networks to make probabilistic predictions of the next word given preceding words. By training NNLMs, we obtain both high performance language models and word embeddings. Following Mikolov et al. (2013b) , we use the recurrent neural network as the basic framework for training NNLMs. RNNLM has achieved the state-of-the-art performance in language modeling (Mikolov, 2012) and learned effective word embeddings for several tasks (Mikolov et al., 2013b) . The architecture of RNNLM is shown in Figure 2 .", |
| "cite_spans": [ |
| { |
| "start": 72, |
| "end": 93, |
| "text": "(Bengio et al., 2003;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 94, |
| "end": 116, |
| "text": "Mnih and Hinton, 2008;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 117, |
| "end": 139, |
| "text": "Mikolov et al., 2013b)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 369, |
| "end": 391, |
| "text": "Mikolov et al. (2013b)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 546, |
| "end": 561, |
| "text": "(Mikolov, 2012)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 618, |
| "end": 641, |
| "text": "(Mikolov et al., 2013b)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 682, |
| "end": 690, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "The input layer of RNNLM consists of two components: w(t) and h(t \u2212 1). w(t) is the one-hot representation of the word at time step t, 2 h(t \u2212 1) is the output of hidden layer at the last time step. Therefore, the input encodes all previous history when predicting the next word at time step t. Compared with other feed-forward NNLMs, the RNNLM can theoretically represent longer context patterns. The output y(t) represents the probability distribution of the next word p(w(t + 1)|w(t), h(t \u2212 1)). The output values are computed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h(t) = f (Uw(t) + Wh(t \u2212 1))", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y(t) = g(Vh(t))", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "where f is a sigmoid function and g is a softmax function. The RNNLM is trained by maximizing the log-likelihood of the training data using stochastic gradient descent (SGD), in which back propagation through time (BPTT) is used to efficiently compute the gradients. In the RNNLM, U is the embedding matrix, where each column vector represents a word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "As discussed in Section 1, the RNNLM and even most NNLMs ignore the polysemy phenomenon in natural languages and induce a single embedding for each word. We address this issue and introduce an effective approach for capturing polysemy in the next section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Monolingual sense-labeled data", |
| "sec_num": null |
| }, |
| { |
| "text": "In our approach, WSI is performed prior to the training of word embeddings. Inspired by Gale et al. (1992) and Chan and Ng (2005) , who used bilingual data for automatically generating training examples of WSD, we present a bilingual approach for unsupervised WSI, as shown in Figure 1 . First, we extract the translations of the source language words from bilingual data (x). Since there may be multiple translations for the same sense of a source language word, it is straightforward to cluster the translation words, exhibiting different senses in different clusters (y).", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 106, |
| "text": "Gale et al. (1992)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 111, |
| "end": 129, |
| "text": "Chan and Ng (2005)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 277, |
| "end": 285, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sense-specific Word Embedding Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Once word senses are effectively induced for each word, we are able to form the sense-labeled training data of RNNLMs by tagging each word occurrence in the source language text with its associated sense cluster (z). Finally, the sense-tagged corpus is used to train the sense-specific word embeddings in a standard manner ({).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense-specific Word Embedding Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Given bilingual data after word alignment, we present a way of extracting translation words for source language words by exploiting the translation probability produced by word alignment models (Brown et al., 1993; Och and Ney, 2003; Liang et al., 2006) .", |
| "cite_spans": [ |
| { |
| "start": 194, |
| "end": 214, |
| "text": "(Brown et al., 1993;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 215, |
| "end": 233, |
| "text": "Och and Ney, 2003;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 234, |
| "end": 253, |
| "text": "Liang et al., 2006)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Translation Words Extraction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "More formally, we notate the Chinese sentence as c = (c 1 , ..., c I ) and English sentence as e = (e 1 , ..., e J ). The alignment models can be generally factored as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Translation Words Extraction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(c|e) = a p(a, c|e) (3) p(a, c|e) = J j=1 p d (a j |a j\u2212 , j)p t (c j |e a j )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Translation Words Extraction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where a is the alignment specifying the position of an English word aligned to each Chinese word,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Translation Words Extraction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "p d (a j |a j\u2212 , j)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Translation Words Extraction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "is the distortion probability, and p t (c j |e a j ) is the translation probability which we use. Table 1 : Results of our approach on a sample of polysemous words. The second column lists the extracted translation words of the source language word (Section 3.1). The third column lists the clustering results using affinity propagation (Section 3.2). The last column lists the nearest neighbour words computed using the learned sense-specific word embeddings (Section 5.2.2).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 98, |
| "end": 105, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Translation Words Extraction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In this paper, we use the alignment model proposed by Liang et al. (2006) . We utilize the bidirectional translation probabilities for the extraction of translations, where a foreign language word w e is determined as a translation of source language word w c only if both translation probabilities p t (w c |w e ) and p t (w e |w c ) exceed some threshold 0 < \u03b4 < 1.", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 73, |
| "text": "Liang et al. (2006)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Translation Words Extraction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The second column of Table 1 presents the extraction results on a sample of source language words with the corresponding translation words.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 21, |
| "end": 28, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Translation Words Extraction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For each source language word, its translation words are then clustered so as to separate different senses. At the clustering time, we first represent each translation word with a feature vector (point), so that we can measure the similarities between points. Then we perform clustering on these feature vectors, representing different senses in different clusters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clustering of Translation Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Different from Apidianaki (2008) who represents all occurrences of the translation words with their contexts in the foreign language for clustering, we adopt the embeddings of the translation words as the representations and directly perform clustering on the translation words, 3 rather than the contexts of occurrences. The embedding representation is chosen for two reasons: (1) Word embeddings encode rich lexical semantics. They can be directly used to measure word similarities. (2) Embedding representation of the translation words leads to extremely high-efficiency clustering, because the number of translation words is orders of magnitude less than their occurrences.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 32, |
| "text": "Apidianaki (2008)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clustering of Translation Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Moreover, since the number of senses of different source language words is varied, the commonlyused k-means algorithm becomes inappropriate for this situation. Instead, we employ affinity propagation (AP) algorithm (Frey and Dueck, 2007) for clustering. In AP, each cluster is represented by one of the samples of it, which we call an exemplar. AP finds the exemplars iteratively based on the concept of \"message passing\". AP has the major advantage that the number of the resulting clusters is dynamic, which mainly depends on the distribution of the data. Compared with other possible clustering approaches, such as hierarchical agglomerative clustering (Kartsaklis et al., 2013) , AP determines the number of resulting clusters automatically without using any partition criterions.", |
| "cite_spans": [ |
| { |
| "start": 215, |
| "end": 237, |
| "text": "(Frey and Dueck, 2007)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 656, |
| "end": 681, |
| "text": "(Kartsaklis et al., 2013)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clustering of Translation Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The third column of Table 1 lists the resulting clusters of the translation words for the sampled polysemous words. We can see that the resulting clusters are meaningful: senses are well represented by clusters of translation words.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 20, |
| "end": 27, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Clustering of Translation Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The produced clusters are then projected back into the source language to identify word senses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-lingual Word Sense Projection", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For each occurrence w o of the word w in the source language corpora, we first select the aligned word with the highest marginal edge posterior (Liang et al., 2006) as its translation. We then identify the sense of w o by computing the similarities of its translation word with each exemplar of the clusters, and select the one with the maximum similarity. When w o is aligned with NULL, we heuristically identify its sense as the most frequent sense of w that appears in the bilingual dataset.", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 164, |
| "text": "(Liang et al., 2006)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-lingual Word Sense Projection", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "After projecting the word senses into the source language, we obtain a sense-labeled corpus, which is used to train the sense-specific word embeddings with RNNLM. The training process is exactly the same as single embeddings, except that the words in our training corpus has been labeled with senses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-lingual Word Sense Projection", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "One of the attractive characteristic of word embeddings is that they can be directly used as word features in various NLP applications, including NER, chunking, etc. Despite of the usefulness of word embeddings on these applications, previous work seldom concerns that words may have multiple senses, which cannot be effectively represented with single embeddings. In this section, we address this problem by utilizing sense-specific word embeddings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Application of Sense-specific Word Embeddings", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We take the task of Chinese NER as a case study. Intuitively, word senses are important in NER. For instance, \u7f8e is likely to be an NE of Location when it refers to America. However, when it expresses the sense of beautiful, it should not be an NE.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Application of Sense-specific Word Embeddings", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Using sense-specific word embedding features for NER is not as straightforward as using single embeddings. For each word in the NER data, we first need to determine the correct word sense of it, which is a typical WSD problem. Then we use the embedding which corresponds to that sense as features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Application of Sense-specific Word Embeddings", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Here we treat WSD as a sequence labeling problem, and solve it with a very natural algorithm based on RNNLM we have already trained (Section 3).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Application of Sense-specific Word Embeddings", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Given the automatically induced word sense inventories and the RNNLM which has already been trained on the sense-labeled data of source language, we first develop a greedy decoding algorithm for the sequential WSD, which works deterministically. Then we improve it using beam search.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RNNLM-based Word Sense Disambiguation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Greedy. For word w, we denote the sense-labeled w as w s k , where s k represents the k th sense of w. In each step, a single decision is made and the sense of next word (w(t + 1)) which has the maximum RNNLM output is chosen, given the current (sense-labeled) word w(t) s * and the hidden layer h(t \u2212 1) at the last time step as input. We simply need to compute a shortlist of y(t) associated with w(t + 1), that is, y(t)| w(t+1) at each step. This process is illustrated in Figure 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 476, |
| "end": 484, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "RNNLM-based Word Sense Disambiguation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Beam search. The greedy procedure described above can be improved using a left-to-right beam search decoding for obtaining a better sequence. The beam-search decoding algorithm keeps B different sequences of decisions in the agenda, and the sequence with the best overall score is chosen as the final sense sequence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RNNLM-based Word Sense Disambiguation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Note that the dynamic programming decoding (e.g. viterbi) is not applicable here, because of the recurrent characteristic of RNNLM. At each step, decisions made by RNNLM depends on all previous decisions instead of the previous state only, hence markov assumption is not satisfied.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RNNLM-based Word Sense Disambiguation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The Chinese-English parallel datasets we use include LDC03E24, LDC04E12 (1998), the IWSLT 2008 evaluation campaign dataset and the PKU 863 parallel dataset. All corpora are sentence-aligned. After cleaning and filtering the corpus, 4 we obtain 918,681 pairs of sentences (21.7M words).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In this paper, we use BerkeleyAligner to produce word alignments over the parallel dataset. 5 Berke-leyAligner also gives translation probabilities and marginal edge posterior probabilities. We adopt the", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "h(t-1) U W V h(t) Next word (sense-labeled) Last word (sense-labeled) w(t) s* w(t+1) s 1 y(t)| w(t+1) w(t+1) s 2 w(t+1) s K \u2026 y(t) \u2026 w(t+1) s* max w(t) s* w(t+1) s* w(t+1) s* w(t+2) s* w(t+2) s* w(t+3) s* h(t) h(t+1) h(t+2) h(t-1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Shortlist \u2026 Figure 3 : Using RNNLM for WSD by sequential labeling (left). Decision at each step of the RNNLMbased WSD algorithm (right).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "scikit-learn tool (Pedregosa et al., 2011) to implement the AP clustering algorithm. 6 The AP algorithm is not fully automatic in deciding the cluster number. There is a tunable parameter calls preference. A preference with a larger value encourages more clusters to be produced. We set the preference at the median value of the input similarity matrix to obtain a moderate number of clusters. The rnnlm toolkit developed by Mikolov et al. (2011) is used to train RNNLM and obtain word embeddings. 7 We induce both single and sense-specific embeddings with 50 dimensions. Finally, We obtain embeddings of a vocabulary of 217K words, with a proportion of 8.4% having multiple sense clusters.", |
| "cite_spans": [ |
| { |
| "start": 18, |
| "end": 42, |
| "text": "(Pedregosa et al., 2011)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 85, |
| "end": 86, |
| "text": "6", |
| "ref_id": null |
| }, |
| { |
| "start": 425, |
| "end": 446, |
| "text": "Mikolov et al. (2011)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 498, |
| "end": 499, |
| "text": "7", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Word embeddings can be directly used for computing similarities between words, which benefits many practical applications. Therefore, we first evaluate our embeddings using a similarity measurement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation on Word Similarity", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Word similarities are calculated using the MaxSim and AvgSim metric (Reisinger and Mooney, 2010) :", |
| "cite_spans": [ |
| { |
| "start": 68, |
| "end": 96, |
| "text": "(Reisinger and Mooney, 2010)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation on Word Similarity", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "M axSim(u, v) = max 1\u2264i\u2264ku,1\u2264j\u2264kv s(u i , v j ) (5) AvgSim(u, v) = 1 ku\u00d7kv ku i=1 kv j=1 s(u i , v j )", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Evaluation on Word Similarity", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "where k u and k v are the number of the induced senses for words u and v, respectively. s(\u2022, \u2022) can be any standard similarity measure. In this study, we use the cosine similarity. Previous works used the WordSim-353 dataset (Finkelstein et al., 2002) or the Chinese version (Jin and Wu, 2012) for the evaluation of general word similarity. These datasets rarely contain polysemous words, and thus is unsuitable for our evaluation. To the best of our knowledge, no datasets for polysemous word similarity evaluation have been published yet, either in English or Chinese. In order to fill this gap in the research community, we manually construct a Chinese polysemous word similarity dataset.", |
| "cite_spans": [ |
| { |
| "start": 225, |
| "end": 251, |
| "text": "(Finkelstein et al., 2002)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 275, |
| "end": 293, |
| "text": "(Jin and Wu, 2012)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation on Word Similarity", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We adopt the HowNet database (Dong and Dong, 2006) in constructing the dataset. HowNet is a Chinese knowledge database that maintains comprehensive semantic definitions for each word in Chinese. The process of the dataset construction includes three steps: (1) Commonly used polysemous words are extracted according to their sense definitions in HowNet. (2) For each polysemous word, we select several other words to form word pairs with it. (3) Each word pair is manually annotated with similarity.", |
| "cite_spans": [ |
| { |
| "start": 29, |
| "end": 50, |
| "text": "(Dong and Dong, 2006)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Polysemous Word Similarity Dataset Construction", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "In step (1), we mainly took advantage of HowNet for the selection of polysemous words. However, the synsets defined in HowNet are often too fine-grained and many of them are difficult to distinguish, particularly for non-experts. Therefore, we manually discard those words with senses that are hard to distinguish.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Polysemous Word Similarity Dataset Construction", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "In step (2), for each polysemous word w selected in step 1, we sample several other words to form word pairs with w. The sampled words can be roughly divided into two categories: related and unrelated. The related words are sampled manually. They can be the hypernym, hyponym, sibling, (near-)synonym, antonym, or topically related to one sense of w. The unrelated words are sampled randomly.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Polysemous Word Similarity Dataset Construction", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "In step (3), we ask six graduate students who majored in computational linguistics to assign each word pair a similarity score. Following the setting of WordSim-353, we restrict the similarity score in the range (0.0, 10.0). To address the inconsistency of the annotations, we discard those word pairs with a standard deviation greater than 1.0. We end up with 401 word pairs annotated with acceptable consistency. Unlike the WordSim-353, in which most of the words are nouns, the words in our dataset are more diverse in terms of part-of-speech tags. Table 2 lists a sample of word pairs with annotated similarities from the dataset. The whole evaluation dataset will be publicly available for the research community. Table 2 : Sample word pairs of our dataset. The unrelated words are randomly sampled. M ean.Sim represents the mean similarity of the annotations, Std.Dev represents the standard deviation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 552, |
| "end": 559, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 719, |
| "end": 726, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Chinese Polysemous Word Similarity Dataset Construction", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "Following Zou et al. (2013) , we use Spearman's \u03c1 correlation and Kendall's \u03c4 correlation for evaluation. The results are shown in Table 3 . By utilizing sense-specific embeddings, our approach significantly outperforms the single-version using either MaxSim or AvgSim measurement. For comparison with multi-prototype methods, we borrow the context-clustering idea from Huang et al. 2012, which was first presented by Sch\u00fctze (1998) . The occurrences of a word are represented by the average embeddings of its context words. Following Huang et al.'s settings, we use a context window of size 10 and all occurrences of a word are clustered using the spherical k-means algorithm, where k is tuned with a development set and finally set to 2.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 27, |
| "text": "Zou et al. (2013)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 418, |
| "end": 432, |
| "text": "Sch\u00fctze (1998)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 131, |
| "end": 138, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation Results", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "MaxSim AvgSim \u03c1 \u00d7100 \u03c4 \u00d7100 \u03c1 \u00d7100 \u03c4 \u00d7100 Surprisingly, the multi-prototype method performs even slightly worse than the single-version, which suggests that learning a fixed number of embeddings for every word may even harm the embedding. Additionally, the clustering process of the multi-prototype approach suffers from high memory and time cost, especially for the high-frequency words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": null |
| }, |
| { |
| "text": "To obtain intuitive insight into the superior performance of sense-specific embeddings, we list in the last column of Table 1 the nearest neighborhoods of the sampled words in the evaluation dataset. The list shows that we are able to find the different meanings of a word by using sense-specific embeddings.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 118, |
| "end": 125, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "System", |
| "sec_num": null |
| }, |
| { |
| "text": "We further apply the sense-specific embeddings as features to Chinese NER. We first perform WSD on the NER data using the algorithm introduced in Section 4. For beam search decoding, the beam size B is tuned on a development set and is finally set to 16.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Application on Chinese NER", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "We conduct our experiments on data from People's Daily (Jan. and Jun. 1998). 9 The original corpus contains seven NE types. 10 In this study, we select the three most common NE types: Person, Location, Organization. The data from January are chosen as the training set (37,426 sentences). The first 2,000 sentences from June are chosen as the development set and the next 8,000 sentences as the test set.", |
| "cite_spans": [ |
| { |
| "start": 124, |
| "end": 126, |
| "text": "10", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Application on Chinese NER", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "CRF models are used in our NER system and are optimized by L2-regularized SGD. We use the CRFSuite (Okazaki, 2007) because it accepts feature vectors with numerical values. The state-of-the-art features (Che et al., 2013) are used in our baseline system. For both single and sense-specific embedding features, we use a window size of 4 (two words before and two words after). Table 4 demonstrates the performance of NER on the test set. As desired, the single embedding features improve the performance of our baseline, which were also shown in (Turian et al., 2010) . Furthermore, the sense-specific embeddings outperform the single word embeddings by nearly 1% F-score (88.56 vs. 87.58), which is statistically significant (p-value < 0.01 using one-tail t-test). According to our hypothesis, the sense-specific embeddings should bring considerable improvements to the NER of polysemous words. To verify this, we evaluate the per-token accuracy of the polysemous words in the NER test data. We again adopt HowNet to determine the polysemy. Words that are defined with multiple senses are selected as test set. Figure 4 shows that the sense-specific embeddings indeed improve the NE recognition of the polysemous words, whereas the single embeddings even decrease the accuracy slightly. We also obtain improvements on the NE recognition of the monosemous words, which provide evidences that more accurate prediction of polysemous words is beneficial for the prediction of the monosemous words through contextual influence.", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 114, |
| "text": "(Okazaki, 2007)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 203, |
| "end": 221, |
| "text": "(Che et al., 2013)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 545, |
| "end": 566, |
| "text": "(Turian et al., 2010)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 376, |
| "end": 383, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 1111, |
| "end": 1119, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Application on Chinese NER", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Previous studies have explored the NNLMs, which predict the next word given some history or future words as context within a neural network architecture. Schwenk and Gauvain (2002) , Bengio et al. (2003) , Mnih and Hinton (2007) , and Collobert et al. (2011) proposed language models based on feedforward neural networks. Mikolov et al. (2010) studied language models based on RNN, which managed to represent longer history information for word-predicting and demonstrated outstanding performance.", |
| "cite_spans": [ |
| { |
| "start": 154, |
| "end": 180, |
| "text": "Schwenk and Gauvain (2002)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 183, |
| "end": 203, |
| "text": "Bengio et al. (2003)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 206, |
| "end": 228, |
| "text": "Mnih and Hinton (2007)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 235, |
| "end": 258, |
| "text": "Collobert et al. (2011)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 322, |
| "end": 343, |
| "text": "Mikolov et al. (2010)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Besides, researchers have also explored the word embeddings learned by NNLMs. Collobert et al. Figure 4 : Per-token accuracy on the polysemous and monosemous words in the NER test data. Polysemous(k) represents the set of words that have more than or equal to k senses defined in HowNet.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 95, |
| "end": 103, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "are shown to capture many relational similarities, which can be recovered by vector arithmetic in the embedding space (Mikolov et al., 2013b; Fu et al., 2014) . Klementiev et al. (2012) and Zou et al. (2013) learned cross-lingual word embeddings by utilizing MT word alignments in bilingual parallel data to constrain translational equivalence. Most previous NNLMs induce single embedding for each word, ignoring the polysemous property of languages. In an attempt to capture the different senses or usage of a word, Reisinger and Mooney (2010) and Huang et al. (2012) proposed multi-prototype models for inducing multiple embeddings for each word. They did this by clustering the contexts of words. These multi-prototype models simply induced a fixed number of embeddings for every word, regardless of the real sense capacity of the specific word.", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 141, |
| "text": "(Mikolov et al., 2013b;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 142, |
| "end": 158, |
| "text": "Fu et al., 2014)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 161, |
| "end": 185, |
| "text": "Klementiev et al. (2012)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 190, |
| "end": 207, |
| "text": "Zou et al. (2013)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 517, |
| "end": 544, |
| "text": "Reisinger and Mooney (2010)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 549, |
| "end": 568, |
| "text": "Huang et al. (2012)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "There has been a lot of work on using bilingual resources for word sense disambiguation (Gale et al., 1992; Chan and Ng, 2005) . By using aligned bilingual data along with word sense inventories such as WordNet, training examples for WSD can be automatically gathered. We employ this idea for word sense induction in our study, which is free of any pre-defined word sense thesaurus.", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 107, |
| "text": "(Gale et al., 1992;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 108, |
| "end": 126, |
| "text": "Chan and Ng, 2005)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The most similar work to our sense induction method is Apidianaki (2008) . They presented a method of sense induction by clustering all occurrences of each word's translation words. In their approach, occurrences are represented with their contexts. We suggest that clustering contexts suffer from high memory and time cost, as well as data sparsity. In our method, by clustering the embeddings of translation words, we induce word senses much more efficiently.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 72, |
| "text": "Apidianaki (2008)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "To evaluate word similarity models, researchers often apply a dataset with human-judged similarities on word pairs, such as WordSim-353 (Finkelstein et al., 2002) , MC (Miller and Charles, 1991) , RG (Rubenstein and Goodenough, 1965) and Jin and Wu (2012) . For context-based multi-prototype models, (Huang et al., 2012) constructs a dataset with context-dependent word similarity. To the best of our knowledge, there is no publicly available datasets for context-unaware polysemous word similarity evaluation yet. This paper fills this gap.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 162, |
| "text": "(Finkelstein et al., 2002)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 165, |
| "end": 194, |
| "text": "MC (Miller and Charles, 1991)", |
| "ref_id": null |
| }, |
| { |
| "start": 200, |
| "end": 233, |
| "text": "(Rubenstein and Goodenough, 1965)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 238, |
| "end": 255, |
| "text": "Jin and Wu (2012)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "This paper presents a novel and effective approach of producing sense-specific word embeddings by exploiting bilingual parallel data. The proposed embeddings are expected to capture the multiple senses of polysemous words. Evaluation on a manually annotated Chinese polysemous word similarity dataset shows that the sense-specific embeddings significantly outperforms the single embeddings and the multiprototype approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Another contribution of this study is the development of a beam-search decoding algorithm based on RNNLM for monolingual WSD. This algorithm bridges the proposed sense-specific embeddings and practical applications, where no bilingual information is provided. Experiments on Chinese NER show that the sense-specific embeddings indeed improve the performance, especially for the recognition of the polysemous words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In this paper, source language refers to Chinese, whereas foreign language refers to English. This work is licenced under a Creative Commons Attribution 4.0 International Licence. Page numbers and proceedings footer are added by the organisers. Licence details: http:// creativecommons.org/licenses/by/4.0/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "A feature vector of the same size of the vocabulary, and only one dimension is on.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The publicly available word embeddings proposed byCollobert et al. (2011) are used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Sentences that are too long (more than 40 words) or too short (less than 10 words) are discarded. 5 code.google.com/p/berkeleyaligner/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "scikit-learn.org 7 www.fit.vutbr.cz/\u02dcimikolov/rnnlm/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "ir.hit.edu.cn/\u02dcjguo", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "www.icl.pku.edu.cn/icl groups/corpus/dwldform1.asp 10 Person, Location, Organization, Date, Time, Number and Miscellany", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We are grateful to Dr. Zhenghua Li, Yue Zhang, Shiqi Zhao, Meishan Zhang and the anonymous reviewers for their insightful comments and suggestions. This work was supported by the National Key Basic Research Program of China via grant 2014CB340503 and 2014CB340505, the National Natural Science Foundation of China (NSFC) via grant 61370164.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Translation-oriented word sense induction based on parallel corpora", |
| "authors": [ |
| { |
| "first": "Marianna", |
| "middle": [], |
| "last": "Apidianaki", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 6th Conference on Language Resources and Evaluation (LREC)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marianna Apidianaki. 2008. Translation-oriented word sense induction based on parallel corpora. In In Proceed- ings of the 6th Conference on Language Resources and Evaluation (LREC), Marrakech, Morocco.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A neural probabilistic language model", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9jean", |
| "middle": [], |
| "last": "Ducharme", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Janvin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "1137--1155", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, R\u00e9jean Ducharme, Pascal Vincent, and Christian Janvin. 2003. A neural probabilistic language model. The Journal of Machine Learning Research, 3:1137-1155.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The mathematics of statistical machine translation: Parameter estimation", |
| "authors": [ |
| { |
| "first": "Vincent J Della", |
| "middle": [], |
| "last": "Peter F Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen A Della", |
| "middle": [], |
| "last": "Pietra", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert L", |
| "middle": [], |
| "last": "Pietra", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mercer", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational linguistics", |
| "volume": "19", |
| "issue": "2", |
| "pages": "263--311", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter F Brown, Vincent J Della Pietra, Stephen A Della Pietra, and Robert L Mercer. 1993. The mathematics of statistical machine translation: Parameter estimation. Computational linguistics, 19(2):263-311.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Scaling up word sense disambiguation via parallel texts", |
| "authors": [ |
| { |
| "first": "Yee", |
| "middle": [], |
| "last": "Seng Chan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "AAAI", |
| "volume": "5", |
| "issue": "", |
| "pages": "1037--1042", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yee Seng Chan and Hwee Tou Ng. 2005. Scaling up word sense disambiguation via parallel texts. In AAAI, volume 5, pages 1037-1042.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Named entity recognition with bilingual constraints", |
| "authors": [ |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "Mengqiu", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "52--62", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wanxiang Che, Mengqiu Wang, Christopher D. Manning, and Ting Liu. 2013. Named entity recognition with bilingual constraints. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 52-62, Atlanta, Georgia, June.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Natural language processing (almost) from scratch", |
| "authors": [ |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Karlen", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Kuksa", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2493--2537", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural language processing (almost) from scratch. The Journal of Machine Learning Research, 12:2493-2537.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "HowNet and the Computation of Meaning", |
| "authors": [ |
| { |
| "first": "Zhendong", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhendong Dong and Qiang Dong. 2006. HowNet and the Computation of Meaning. World Scientific.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Placing search in context: The concept revisited", |
| "authors": [ |
| { |
| "first": "Lev", |
| "middle": [], |
| "last": "Finkelstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Evgeniy", |
| "middle": [], |
| "last": "Gabrilovich", |
| "suffix": "" |
| }, |
| { |
| "first": "Yossi", |
| "middle": [], |
| "last": "Matias", |
| "suffix": "" |
| }, |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Rivlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zach", |
| "middle": [], |
| "last": "Solan", |
| "suffix": "" |
| }, |
| { |
| "first": "Gadi", |
| "middle": [], |
| "last": "Wolfman", |
| "suffix": "" |
| }, |
| { |
| "first": "Eytan", |
| "middle": [], |
| "last": "Ruppin", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "ACM Transactions on Information Systems", |
| "volume": "20", |
| "issue": "1", |
| "pages": "116--131", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lev Finkelstein, Evgeniy Gabrilovich, Yossi Matias, Ehud Rivlin, Zach Solan, Gadi Wolfman, and Eytan Ruppin. 2002. Placing search in context: The concept revisited. ACM Transactions on Information Systems, 20(1):116- 131.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Clustering by passing messages between data points. science", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Brendan", |
| "suffix": "" |
| }, |
| { |
| "first": "Delbert", |
| "middle": [], |
| "last": "Frey", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dueck", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "315", |
| "issue": "", |
| "pages": "972--976", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brendan J Frey and Delbert Dueck. 2007. Clustering by passing messages between data points. science, 315(5814):972-976.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Learning semantic hierarchies via word embeddings", |
| "authors": [ |
| { |
| "first": "Ruiji", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiang", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52th Annual Meeting of the Association for Computational Linguistics: Long Papers", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruiji Fu, Jiang Guo, Bing Qin, Wanxiang Che, Haifeng Wang, and Ting Liu. 2014. Learning semantic hierar- chies via word embeddings. In Proceedings of the 52th Annual Meeting of the Association for Computational Linguistics: Long Papers-Volume 1, Baltimore MD, USA.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Using bilingual materials to develop word sense disambiguation methods", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gale", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Kenneth", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Church", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Proceedings of the 4th International Conference on Theoretical and Methodological Issues in Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "101--112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William A Gale, Kenneth W Church, and David Yarowsky. 1992. Using bilingual materials to develop word sense disambiguation methods. In Proceedings of the 4th International Conference on Theoretical and Methodologi- cal Issues in Machine Translation, pages 101-112.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Improving word representations via global context and multiple word prototypes", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Eric", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew Y", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Long Papers", |
| "volume": "1", |
| "issue": "", |
| "pages": "873--882", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric H Huang, Richard Socher, Christopher D Manning, and Andrew Y Ng. 2012. Improving word representations via global context and multiple word prototypes. In Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Long Papers-Volume 1, pages 873-882.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Semeval-2012 task 4: evaluating chinese word similarity", |
| "authors": [ |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yunfang", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the First Joint Conference on Lexical and Computational Semantics", |
| "volume": "1", |
| "issue": "", |
| "pages": "374--377", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peng Jin and Yunfang Wu. 2012. Semeval-2012 task 4: evaluating chinese word similarity. In Proceedings of the First Joint Conference on Lexical and Computational Semantics-Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth International Workshop on Semantic Evaluation, pages 374-377.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Separating disambiguation from composition in distributional semantics", |
| "authors": [ |
| { |
| "first": "Dimitri", |
| "middle": [], |
| "last": "Kartsaklis", |
| "suffix": "" |
| }, |
| { |
| "first": "Mehrnoosh", |
| "middle": [], |
| "last": "Sadrzadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Pulman", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "114--123", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dimitri Kartsaklis, Mehrnoosh Sadrzadeh, and Stephen Pulman. 2013. Separating disambiguation from composi- tion in distributional semantics. CoNLL-2013, pages 114-123.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Inducing crosslingual distributed representations of words", |
| "authors": [ |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Klementiev", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| }, |
| { |
| "first": "Binod", |
| "middle": [], |
| "last": "Bhattarai", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of COLING 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "1459--1474", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexandre Klementiev, Ivan Titov, and Binod Bhattarai. 2012. Inducing crosslingual distributed representations of words. In Proceedings of COLING 2012, pages 1459-1474, Mumbai, India, December.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Alignment by agreement", |
| "authors": [ |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Taskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the main conference on Human Language Technology Conference of the North American Chapter of the Association of Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "104--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Percy Liang, Ben Taskar, and Dan Klein. 2006. Alignment by agreement. In Proceedings of the main conference on Human Language Technology Conference of the North American Chapter of the Association of Computa- tional Linguistics, pages 104-111.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Recurrent neural network based language model", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Karafi\u00e1t", |
| "suffix": "" |
| }, |
| { |
| "first": "Luk\u00e1\u0161", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Cernocky", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "1045--1048", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Martin Karafi\u00e1t, Luk\u00e1\u0161 Burget, Jan Cernocky, and Sanjeev Khudanpur. 2010. Recurrent neural network based language model. In Proceedings of Interspeech, pages 1045-1048.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Rnnlm-recurrent neural network language modeling toolkit", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Kombrink", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Deoras", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukar", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u010dernock\u1ef3", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. of the 2011 ASRU Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "196--201", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Stefan Kombrink, Anoop Deoras, Lukar Burget, and J\u010cernock\u1ef3. 2011. Rnnlm-recurrent neural network language modeling toolkit. In Proc. of the 2011 ASRU Workshop, pages 196-201.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1301.3781" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013a. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Linguistic regularities in continuous space word representations", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Yih", |
| "middle": [], |
| "last": "Wen-Tau", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Zweig", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "746--751", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Wen-tau Yih, and Geoffrey Zweig. 2013b. Linguistic regularities in continuous space word representations. In Proceedings of NAACL-HLT, pages 746-751.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Statistical Language Models Based on Neural Networks", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov. 2012. Statistical Language Models Based on Neural Networks. Ph.D. thesis, Ph. D. thesis, Brno University of Technology.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Contextual correlates of semantic similarity", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Walter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Charles", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "Language and cognitive processes", |
| "volume": "6", |
| "issue": "1", |
| "pages": "1--28", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A Miller and Walter G Charles. 1991. Contextual correlates of semantic similarity. Language and cognitive processes, 6(1):1-28.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Three new graphical models for statistical language modelling", |
| "authors": [ |
| { |
| "first": "Andriy", |
| "middle": [], |
| "last": "Mnih", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 24th international conference on Machine learning", |
| "volume": "", |
| "issue": "", |
| "pages": "641--648", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andriy Mnih and Geoffrey Hinton. 2007. Three new graphical models for statistical language modelling. In Proceedings of the 24th international conference on Machine learning, pages 641-648.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "A scalable hierarchical distributed language model", |
| "authors": [ |
| { |
| "first": "Andriy", |
| "middle": [], |
| "last": "Mnih", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1081--1088", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andriy Mnih and Geoffrey E Hinton. 2008. A scalable hierarchical distributed language model. In Advances in neural information processing systems, pages 1081-1088.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A systematic comparison of various statistical alignment models", |
| "authors": [ |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Franz", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Computational linguistics", |
| "volume": "29", |
| "issue": "1", |
| "pages": "19--51", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franz Josef Och and Hermann Ney. 2003. A systematic comparison of various statistical alignment models. Computational linguistics, 29(1):19-51.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Crfsuite: a fast implementation of conditional random fields (crfs", |
| "authors": [ |
| { |
| "first": "Naoaki", |
| "middle": [], |
| "last": "Okazaki", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naoaki Okazaki. 2007. Crfsuite: a fast implementation of conditional random fields (crfs). URL http://www. chokkan. org/software/crfsuite.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Scikitlearn: Machine learning in Python", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Pedregosa", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Varoquaux", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gramfort", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Thirion", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Grisel", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Blondel", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Prettenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Dubourg", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Vanderplas", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Passos", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Cournapeau", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Brucher", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Perrot", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Duchesnay", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2825--2830", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. 2011. Scikit- learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Multi-prototype vector-space models of word meaning", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Reisinger", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Raymond", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "109--117", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph Reisinger and Raymond J Mooney. 2010. Multi-prototype vector-space models of word meaning. In Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics, pages 109-117.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Contextual correlates of synonymy", |
| "authors": [ |
| { |
| "first": "Herbert", |
| "middle": [], |
| "last": "Rubenstein", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goodenough", |
| "suffix": "" |
| } |
| ], |
| "year": 1965, |
| "venue": "Communications of the ACM", |
| "volume": "8", |
| "issue": "10", |
| "pages": "627--633", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Herbert Rubenstein and John B Goodenough. 1965. Contextual correlates of synonymy. Communications of the ACM, 8(10):627-633.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Automatic word sense discrimination", |
| "authors": [ |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Computational linguistics", |
| "volume": "24", |
| "issue": "1", |
| "pages": "97--123", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hinrich Sch\u00fctze. 1998. Automatic word sense discrimination. Computational linguistics, 24(1):97-123.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Connectionist language modeling for large vocabulary continuous speech recognition", |
| "authors": [ |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean-Luc", |
| "middle": [], |
| "last": "Gauvain", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Acoustics, Speech, and Signal Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "765--768", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Holger Schwenk and Jean-Luc Gauvain. 2002. Connectionist language modeling for large vocabulary continuous speech recognition. In Acoustics, Speech, and Signal Processing (ICASSP), 2002 IEEE International Confer- ence on, volume 1, pages 765-768.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Dynamic pooling and unfolding recursive autoencoders for paraphrase detection", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Eric", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pennin", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "801--809", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Eric H Huang, Jeffrey Pennin, Christopher D Manning, and Andrew Ng. 2011. Dynamic pooling and unfolding recursive autoencoders for paraphrase detection. In Advances in Neural Information Processing Systems, pages 801-809.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Word representations: a simple and general method for semi-supervised learning", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Turian", |
| "suffix": "" |
| }, |
| { |
| "first": "Lev", |
| "middle": [], |
| "last": "Ratinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "384--394", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph Turian, Lev Ratinov, and Yoshua Bengio. 2010. Word representations: a simple and general method for semi-supervised learning. In Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics, pages 384-394.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Compound embedding features for semisupervised learning", |
| "authors": [ |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tiejun", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Daxiang", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Dianhai", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "563--568", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mo Yu, Tiejun Zhao, Daxiang Dong, Hao Tian, and Dianhai Yu. 2013. Compound embedding features for semi- supervised learning. In Proceedings of NAACL-HLT, pages 563-568.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Bilingual word embeddings for phrase-based machine translation", |
| "authors": [ |
| { |
| "first": "Will", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Zou", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1393--1398", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Will Y. Zou, Richard Socher, Daniel Cer, and Christopher D. Manning. 2013. Bilingual word embeddings for phrase-based machine translation. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1393-1398, Seattle, Washington, USA, October.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "An illustration of the proposed method. SL stands for source language." |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "The basic architecture of RNNLM." |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "(2011) used word embeddings as the input of various NLP tasks, including part-of-speech tagging, chunking, NER, and semantic role labeling. Turian et al. (2010) made a comprehensive comparison of various types of word embeddings as features for NER and chunking. In addition, word embeddings" |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td/><td/><td colspan=\"2\">SL word</td><td>\u5236\u670d</td></tr><tr><td/><td/><td/><td/><td>subdue</td></tr><tr><td>1</td><td/><td/><td/><td>uniform</td></tr><tr><td>2</td><td>E: The workers wearing the factory 's uniform \u2026 C: \u8eab\u7a7f \u8be5 \u5382 \u5236\u670d \u7684 \u5de5\u4eba \u2026</td><td colspan=\"2\">Translations</td><td>overpower subjugate vestment</td></tr><tr><td/><td>E: She overpowered the burglars .</td><td/><td/><td>\u2026\u2026</td></tr><tr><td>3</td><td>C: \u5979 \u5236\u670d \u4e86 \u7a83\u8d3c \u3002</td><td/><td/></tr><tr><td/><td>E: They wore their priestly vestment in Church .</td><td/><td/></tr><tr><td>4</td><td>C: \u5728 \u6559\u5802 \uff0c \u4ed6\u4eec \u8eab\u7a7f \u7267\u5e08 \u7684 \u5236\u670d \u3002</td><td/><td/></tr><tr><td>5</td><td>\u2026\u2026</td><td>\u5236\u670d#1 (clothes)</td><td/><td>uniform vestment</td></tr><tr><td/><td/><td colspan=\"3\">overpower subdue subjugate</td><td>\u5236\u670d#2 (defeat)</td></tr><tr><td/><td/><td colspan=\"3\">Sense-specific</td></tr><tr><td/><td/><td colspan=\"3\">word embeddings</td></tr><tr><td/><td/><td>\u5236\u670d #1</td><td colspan=\"2\">< v1 #1 , v2 #1 , ..., vN #1 ></td></tr><tr><td/><td/><td>\u5236\u670d #2</td><td colspan=\"2\">< v1 #2 , v2 #2 , ..., vN #2 ></td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "text": ": \u2026 subdue , conquer or control the opponent . C: \u2026 \u5236\u670d , \u5f81\u670d \u6216 \u63a7\u5236 \u5bf9\u624b \u3002", |
| "html": null |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td colspan=\"2\">SL Word Translation Words</td><td>Translation Word Clusters</td><td>Nearest Neighbours</td></tr><tr><td>\u5236\u670d</td><td>subdue, subjugate, uniform investment, overpower,</td><td>investment, uniform</td><td/></tr><tr><td>\u6cd5</td><td>act, code, France,</td><td>France, French</td><td>\u5fb7</td></tr><tr><td/><td>French, law, method</td><td/><td/></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "text": "\u7a7f\u7740 dress , \u8b66\u670d policeman unif orm subdue, subjugate, overpower \u6253\u8d25 def eat , \u51fb\u8d25 beat , \u5f81\u670d conquer \u82b1 blossom, cost, flower, spend, take, took flower, blossom \u83dc greens , \u53f6 leaf , \u679c\u5b9e f ruit take, cost, spend \u82b1\u8d39 cost , \u8282\u7701 save , \u5269\u4e0b rest Germany , \u4fc4 Russia , \u82f1 Britain law, act, code \u6cd5\u4ee4 ordinance , \u6cd5\u6848 bill , \u6cd5\u89c4 rule method \u6982\u5ff5 concept , \u65b9\u6848 scheme , \u529e\u6cd5 way \u9886\u5bfc lead, leader, leadership leader, leadership \u4e3b\u7ba1 chief , \u4e0a\u53f8 boss , \u4e3b\u5e2d chairman lead \u76d1\u7763 supervise , \u51b3\u7b56 decision , \u5de5\u4f5c work", |
| "html": null |
| }, |
| "TABREF4": { |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "text": "Spearman's \u03c1 correlation and Kendall's \u03c4 correlation evaluated on the polysemous dataset.", |
| "html": null |
| }, |
| "TABREF6": { |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "text": "Performance of NER on test data.", |
| "html": null |
| } |
| } |
| } |
| } |