| { |
| "paper_id": "D14-1040", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:53:43.737790Z" |
| }, |
| "title": "Probabilistic Models of Cross-Lingual Semantic Similarity in Context Based on Latent Cross-Lingual Concepts Induced from Comparable Data", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Marie-Francine", |
| "middle": [], |
| "last": "Moens", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "ivan.vulic|marie-francine.moens@cs.kuleuven.be" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We propose the first probabilistic approach to modeling cross-lingual semantic similarity (CLSS) in context which requires only comparable data. The approach relies on an idea of projecting words and sets of words into a shared latent semantic space spanned by language-pair independent latent semantic concepts (e.g., crosslingual topics obtained by a multilingual topic model). These latent cross-lingual concepts are induced from a comparable corpus without any additional lexical resources. Word meaning is represented as a probability distribution over the latent concepts, and a change in meaning is represented as a change in the distribution over these latent concepts. We present new models that modulate the isolated out-ofcontext word representations with contextual knowledge. Results on the task of suggesting word translations in context for 3 language pairs reveal the utility of the proposed contextualized models of crosslingual semantic similarity.", |
| "pdf_parse": { |
| "paper_id": "D14-1040", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We propose the first probabilistic approach to modeling cross-lingual semantic similarity (CLSS) in context which requires only comparable data. The approach relies on an idea of projecting words and sets of words into a shared latent semantic space spanned by language-pair independent latent semantic concepts (e.g., crosslingual topics obtained by a multilingual topic model). These latent cross-lingual concepts are induced from a comparable corpus without any additional lexical resources. Word meaning is represented as a probability distribution over the latent concepts, and a change in meaning is represented as a change in the distribution over these latent concepts. We present new models that modulate the isolated out-ofcontext word representations with contextual knowledge. Results on the task of suggesting word translations in context for 3 language pairs reveal the utility of the proposed contextualized models of crosslingual semantic similarity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Cross-lingual semantic similarity (CLSS) is a metric that measures to which extent words (or more generally, text units) describe similar semantic concepts and convey similar meanings across languages. Models of cross-lingual similarity are typically used to automatically induce bilingual lexicons and have found numerous applications in information retrieval (IR), statistical machine translation (SMT) and other natural language processing (NLP) tasks. Within the IR framework, the output of the CLSS models is a key resource in the models of dictionary-based cross-lingual information retrieval (Ballesteros and Croft, 1997; Lavrenko et al., 2002; Levow et al., 2005; Wang and Oard, 2006) or may be utilized in query expansion in cross-lingual IR models (Adriani and van Rijsbergen, 1999; . These CLSS models may also be utilized as an additional source of knowledge in SMT systems (Och and Ney, 2003; Wu et al., 2008) . Additionally, the models are a crucial component in the crosslingual tasks involving a sort of cross-lingual knowledge transfer, where the knowledge about utterances in one language may be transferred to another. The utility of the transfer or annotation projection by means of bilingual lexicons obtained from the CLSS models has already been proven in various tasks such as semantic role labeling (Pad\u00f3 and Lapata, 2009; van der Plas et al., 2011) , parsing (Zhao et al., 2009; Durrett et al., 2012; T\u00e4ckstr\u00f6m et al., 2013b) , POS tagging (Yarowsky and Ngai, 2001; Das and Petrov, 2011; T\u00e4ckstr\u00f6m et al., 2013a; Ganchev and Das, 2013) , verb classification (Merlo et al., 2002) , inducing selectional preferences (Peirsman and Pad\u00f3, 2010) , named entity recognition (Kim et al., 2012) , named entity segmentation (Ganchev and Das, 2013) , etc.", |
| "cite_spans": [ |
| { |
| "start": 599, |
| "end": 628, |
| "text": "(Ballesteros and Croft, 1997;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 629, |
| "end": 651, |
| "text": "Lavrenko et al., 2002;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 652, |
| "end": 671, |
| "text": "Levow et al., 2005;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 672, |
| "end": 692, |
| "text": "Wang and Oard, 2006)", |
| "ref_id": "BIBREF63" |
| }, |
| { |
| "start": 758, |
| "end": 792, |
| "text": "(Adriani and van Rijsbergen, 1999;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 886, |
| "end": 905, |
| "text": "(Och and Ney, 2003;", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 906, |
| "end": 922, |
| "text": "Wu et al., 2008)", |
| "ref_id": "BIBREF64" |
| }, |
| { |
| "start": 1324, |
| "end": 1347, |
| "text": "(Pad\u00f3 and Lapata, 2009;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 1348, |
| "end": 1374, |
| "text": "van der Plas et al., 2011)", |
| "ref_id": "BIBREF59" |
| }, |
| { |
| "start": 1385, |
| "end": 1404, |
| "text": "(Zhao et al., 2009;", |
| "ref_id": "BIBREF67" |
| }, |
| { |
| "start": 1405, |
| "end": 1426, |
| "text": "Durrett et al., 2012;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1427, |
| "end": 1451, |
| "text": "T\u00e4ckstr\u00f6m et al., 2013b)", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 1466, |
| "end": 1491, |
| "text": "(Yarowsky and Ngai, 2001;", |
| "ref_id": "BIBREF65" |
| }, |
| { |
| "start": 1492, |
| "end": 1513, |
| "text": "Das and Petrov, 2011;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1514, |
| "end": 1538, |
| "text": "T\u00e4ckstr\u00f6m et al., 2013a;", |
| "ref_id": "BIBREF55" |
| }, |
| { |
| "start": 1539, |
| "end": 1561, |
| "text": "Ganchev and Das, 2013)", |
| "ref_id": null |
| }, |
| { |
| "start": 1584, |
| "end": 1604, |
| "text": "(Merlo et al., 2002)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 1640, |
| "end": 1665, |
| "text": "(Peirsman and Pad\u00f3, 2010)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 1693, |
| "end": 1711, |
| "text": "(Kim et al., 2012)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 1753, |
| "end": 1763, |
| "text": "Das, 2013)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The models of cross-lingual semantic similarity from parallel corpora rely on word alignment models (Brown et al., 1993; Och and Ney, 2003) , but due to a relative scarceness of parallel texts for many language pairs and domains, the models of cross-lingual similarity from comparable corpora have gained much attention recently.", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 120, |
| "text": "(Brown et al., 1993;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 121, |
| "end": 139, |
| "text": "Och and Ney, 2003)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "All these models from parallel and comparable corpora provide ranked lists of semantically similar words in the target language in isolation or invariably, that is, they do not explicitly iden-tify and encode different senses of words. In practice, it means that, given the sentence \"The coach of his team was not satisfied with the game yesterday.\", these context-insensitive models of similarity are not able to detect that the Spanish word entrenador is more similar to the polysemous word coach in the context of this sentence than the Spanish word autocar, although autocar is listed as the most semantically similar word to coach globally/invariably without any observed context. In another example, while Spanish words partido, encuentro, cerilla or correspondencia are all highly similar to the ambiguous English word match when observed in isolation, given the Spanish sentence \"She was unable to find a match in her pocket to light up a cigarette.\", it is clear that the strength of semantic similarity should change in context as only cerilla exhibits a strong semantic similarity to match within this particular sentential context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Following this intuition, in this paper we investigate models of cross-lingual semantic similarity in context. The context-sensitive models of similarity target to re-rank the lists of semantically similar words based on the co-occurring contexts of words. Unlike prior work (e.g., Prior et al., 2011; Apidianaki, 2011 )), we explore these models in a particularly difficult and minimalist setting that builds only on co-occurrence counts and latent cross-lingual semantic concepts induced directly from comparable corpora, and which does not rely on any other resource (e.g., machine-readable dictionaries, parallel corpora, explicit ontology and category knowledge). In that respect, the work reported in this paper extends the current research on purely statistical data-driven distributional models of cross-lingual semantic similarity that are built upon the idea of latent cross-lingual concepts (Haghighi et al., 2008; Daum\u00e9 III and Jagarlamudi, 2011; Vuli\u0107 et al., 2011; induced from non-parallel data. While all the previous models in this framework are context-insensitive models of semantic similarity, we demonstrate how to build context-aware models of semantic similarity within the same probabilistic framework which relies on the same shared set of latent concepts.", |
| "cite_spans": [ |
| { |
| "start": 282, |
| "end": 301, |
| "text": "Prior et al., 2011;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 302, |
| "end": 318, |
| "text": "Apidianaki, 2011", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 902, |
| "end": 925, |
| "text": "(Haghighi et al., 2008;", |
| "ref_id": null |
| }, |
| { |
| "start": 926, |
| "end": 958, |
| "text": "Daum\u00e9 III and Jagarlamudi, 2011;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 959, |
| "end": 978, |
| "text": "Vuli\u0107 et al., 2011;", |
| "ref_id": "BIBREF61" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The main contributions of this paper are:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We present a new probabilistic approach to modeling cross-lingual semantic similarity in context based on latent cross-lingual seman-tic concepts induced from non-parallel data. \u2022 We show how to use the models of crosslingual semantic similarity in the task of suggesting word translations in context. \u2022 We provide results for three language pairs which demonstrate that contextualized models of similarity significantly outscore context-insensitive models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Latent Cross-Lingual Concepts. Latent crosslingual concepts/senses may be interpreted as language-independent semantic concepts present in a multilingual corpus (e.g., document-aligned Wikipedia articles in English, Spanish and Dutch) that have their language-specific representations in different languages. For instance, having a multilingual collection in English, Spanish and Dutch, and then discovering a latent semantic concept on Soccer, that concept would be represented by words (actually probabilities over words P (w|z k ), where w denotes a word, and z k denotes k-th latent concept): {player, goal, coach, . . . } in English, bal\u00f3n (ball), futbolista (soccer player), equipo (team), . . . } in Spanish, and {wedstrijd (match), elftal (soccer team), doelpunt (goal), . . . } in Dutch. Given a multilingual corpus C, the goal is to learn and extract a set Z of K latent crosslingual concepts {z 1 , . . . , z K } that optimally describe the observed data, that is, the multilingual corpus C. Extracting cross-lingual concepts actually implies learning per-document concept distributions for each document in the corpus, and discovering language-specific representations of these concepts given by per-concept word distributions in each language. Z = {z 1 , . . . , z K } represents the set of K latent cross-lingual concepts present in the multilingual corpus. These K semantic concepts actually span a latent cross-lingual semantic space. Each word w, irrespective of its actual language, may be represented in that latent semantic space as a K-dimensional vector, where each vector component is a conditional concept score P (z k |w).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Towards Cross-Lingual Semantic Similarity in Context", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A number of models may be employed to induce the latent concepts. For instance, one could use cross-lingual Latent Semantic Indexing (Dumais et al., 1996) , probabilistic Principal Component Analysis (Tipping and Bishop, 1999) , or a probabilistic interpretation of non-negative matrix factorization (Lee and Seung, 1999; Gaussier and Goutte, 2005; Ding et al., 2008) on concatenated documents in aligned document pairs. Other more recent models include matching canonical correlation analysis (Haghighi et al., 2008; Daum\u00e9 III and Jagarlamudi, 2011) and multilingual probabilistic topic models (Ni et al., 2009; De Smet and Moens, 2009; Mimno et al., 2009; Boyd-Graber and Blei, 2009; Zhang et al., 2010; Fukumasu et al., 2012) .", |
| "cite_spans": [ |
| { |
| "start": 133, |
| "end": 154, |
| "text": "(Dumais et al., 1996)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 200, |
| "end": 226, |
| "text": "(Tipping and Bishop, 1999)", |
| "ref_id": "BIBREF58" |
| }, |
| { |
| "start": 300, |
| "end": 321, |
| "text": "(Lee and Seung, 1999;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 322, |
| "end": 348, |
| "text": "Gaussier and Goutte, 2005;", |
| "ref_id": null |
| }, |
| { |
| "start": 349, |
| "end": 367, |
| "text": "Ding et al., 2008)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 494, |
| "end": 517, |
| "text": "(Haghighi et al., 2008;", |
| "ref_id": null |
| }, |
| { |
| "start": 518, |
| "end": 550, |
| "text": "Daum\u00e9 III and Jagarlamudi, 2011)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 595, |
| "end": 612, |
| "text": "(Ni et al., 2009;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 613, |
| "end": 637, |
| "text": "De Smet and Moens, 2009;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 638, |
| "end": 657, |
| "text": "Mimno et al., 2009;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 658, |
| "end": 685, |
| "text": "Boyd-Graber and Blei, 2009;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 686, |
| "end": 705, |
| "text": "Zhang et al., 2010;", |
| "ref_id": "BIBREF66" |
| }, |
| { |
| "start": 706, |
| "end": 728, |
| "text": "Fukumasu et al., 2012)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Towards Cross-Lingual Semantic Similarity in Context", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Due to its inherent language pair independent nature and state-of-the-art performance in the tasks such as bilingual lexicon extraction (Vuli\u0107 et al., 2011) and cross-lingual information retrieval , the description in this paper relies on the multilingual probabilistic topic modeling (MuPTM) framework. We draw a direct parallel between latent cross-lingual concepts and latent cross-lingual topics, and we present the framework from the MuPTM perspective, but the proposed framework is generic and allows the usage of all other models that are able to compute probability scores P (z k |w). These scores in MuPTM are induced from their output languagespecific per-topic word distributions. The multilingual probabilistic topic models output probability scores P (w S i |z k ) and", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 156, |
| "text": "(Vuli\u0107 et al., 2011)", |
| "ref_id": "BIBREF61" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Towards Cross-Lingual Semantic Similarity in Context", |
| "sec_num": "2" |
| }, |
| { |
| "text": "P (w T j |z k ) for each w S i \u2208 V S and w T j \u2208 V T and each z k \u2208 Z, and it holds w S i \u2208V S P (w S i |z k ) = 1 and w T j \u2208V T P (w T j |z k ) = 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Towards Cross-Lingual Semantic Similarity in Context", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The scores are then used to compute scores P (z k |w S i ) and P (z k |w T j ) in order to represent words from the two different languages in the same latent semantic space in a uniform way. Context-Insensitive Models of Similarity. Without observing any context, the standard models of semantic word similarity that rely on the semantic space spanned by latent cross-lingual concepts in both monolingual (Dinu and Lapata, 2010a; Dinu and Lapata, 2010b) and multilingual settings (Vuli\u0107 et al., 2011) typically proceed in the following manner. Latent language-independent concepts (e.g., cross-lingual topics or latent word senses) are estimated on a large corpus. The K-dimensional vector representation of the word w S 1 \u2208 V S is:", |
| "cite_spans": [ |
| { |
| "start": 406, |
| "end": 430, |
| "text": "(Dinu and Lapata, 2010a;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 431, |
| "end": 454, |
| "text": "Dinu and Lapata, 2010b)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 481, |
| "end": 501, |
| "text": "(Vuli\u0107 et al., 2011)", |
| "ref_id": "BIBREF61" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Towards Cross-Lingual Semantic Similarity in Context", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "vec(w S 1 ) = [P (z1|w S 1 ), . . . , P (zK |w S 1 )]", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Towards Cross-Lingual Semantic Similarity in Context", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Similarly, we are able to represent any target language word w T 2 in the same latent semantic space by a K-dimensional vector with scores P (z k |w T 2 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Towards Cross-Lingual Semantic Similarity in Context", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Each word regardless of its language is represented as a distribution over K latent concepts. The similarity between w S 1 and some word w T 2 \u2208 V T is then computed as the similarity between their K-dimensional vector representations using some of the standard similarity measures (e.g., the Kullback-Leibler or the Jensen-Shannon divergence, the cosine measure). These methods use only global co-occurrence statistics from the training set and do not take into account any contextual information. They provide only out-of-context word representations and are therefore able to deliver only context-insensitive models of similarity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Towards Cross-Lingual Semantic Similarity in Context", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Defining Context. Given an occurrence of a word w S 1 , we build its context set Con(w S 1 ) = {cw S 1 , . . . , cw S r } that comprises r words from V S that co-occur with w S 1 in a defined contextual scope or granularity. In this work we do not investigate the influence of the context scope (e.g., document-based, paragraph-based, window-based contexts). Following the recent work from Huang et al. (2012) in the monolingual setting, we limit the contextual scope to the sentential context. However, we emphasize that the proposed models are designed to be fully functional regardless of the actual chosen context granularity. e.g., when operating in the sentential context, Con(w S 1 ) consists of words occurring in the same sentence with the particular instance of w S 1 . Following Mitchell and Lapata (2008) , for the sake of simplicity, we impose the bag-of-words assumption, and do not take into account the order of words in the context set as well as context words' dependency relations to w S 1 . Investigating different context types (e.g., dependency-based) is a subject of future work. By using all words occurring with w S 1 in a context set (e.g., a sentence) to build the set Con(w S 1 ), we do not make any distinction between \"informative and \"uninformative\" context words. However, some context words bear more contextual information about the observed word w S 1 and are stronger indicators of the correct word meaning in that particular context. For instance, in the sentence \"The coach of his team was not satisfied with the game yesterday\", words game and team are strong clues that coach should be translated as entrenador while the context word yesterday does not bring any extra contextual information that could resolve the ambiguity.", |
| "cite_spans": [ |
| { |
| "start": 790, |
| "end": 816, |
| "text": "Mitchell and Lapata (2008)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Towards Cross-Lingual Semantic Similarity in Context", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Therefore, in the final context set Con(w S 1 ) it is useful to retain only the context words that re-ally bring extra semantic information. We achieve that by exploiting the same latent semantic space to provide the similarity score between the observed word w S 1 and each word cw S i , i = 1, . . . , r from its context set Con(w S 1 ). Each word cw S i may be represented by its vector vec(cw S i ) (see eq. (1)) in the same latent semantic space, and there we can compute the similarity between its vector and vec(w S 1 ). We can then sort the similarity scores for each cw S i and retain only the top scoring M context words in the final set Con(w S 1 ). The procedure of context sorting and pruning should improve the semantic cohesion between w S 1 and its context since only informative context features are now present in Con(w S 1 ), and we reduce the noise coming from uninformative contextual features that are not semantically related to w S 1 . Other options for the context sorting and pruning are possible, but the main goal in this paper is to illustrate the core utility of the procedure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Towards Cross-Lingual Semantic Similarity in Context", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Representing Context. The probabilistic framework that is supported by latent cross-lingual concepts allows for having the K-dimensional vector representations in the same latent semantic space spanned by cross-lingual topics for: (1) Single words regardless of their actual language, and (2) Sets that comprise multiple words. Therefore, we are able to project the observed source word, all target words, and the context set of the observed source word to the same latent semantic space spanned by latent cross-lingual concepts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Eq. (1) shows how to represent single words in the latent semantic space. Now, we present a way to address compositionality, that is, we show how to build the same representations in the same latent semantic space beyond the word level. We need to compute a conditional concept distribution for the context set Con(w S 1 ), that is, we have to compute the probability scores P (z k |Con(w S 1 )) for each z k \u2208 Z. Remember that the context Con(w S 1 ) is actually a set of r (or M after pruning) words Con(w S 1 ) = {cw S 1 , . . . , cw S r }. Under the singletopic assumption (Griffiths et al., 2007) and following Bayes' rule, it holds:", |
| "cite_spans": [ |
| { |
| "start": 577, |
| "end": 601, |
| "text": "(Griffiths et al., 2007)", |
| "ref_id": "BIBREF54" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (z k |Con(w S 1 )) = P (Con(w S 1 )|z k )P (z k ) P (Con(w S 1 )) = P (cw S 1 , . . . , cw S r |z k )P (z k ) K l=1 P (cw S 1 , . . . , cw S r |z l )P (z l ) (2) = r j=1 P (cw S j |z k )P (z k ) K l=1 r j=1 P (cw S j |z l )P (z l )", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Note that here we use a simplification where we assume that all cw S j \u2208 Con(w S 1 ) are conditionally independent given z k . The assumption of the conditional independence of unigrams is a standard heuristic applied in bag-of-words model in NLP and IR (e.g., one may observe a direct analogy to probabilistic language models for IR where the assumption of independence of query words is imposed (Ponte and Croft, 1998; Hiemstra, 1998; Lavrenko and Croft, 2001) ), but we have to forewarn the reader that in general the equation P (cw S 1 , . . . , cw S r |z k ) = r j=1 P (cw S j |z k ) is not exact. However, by adopting the conditional independence assumption, in case of the uniform topic prior P (z k ) (i.e., we assume that we do not posses any prior knowledge about the importance of latent cross-lingual concepts in a multilingual corpus), eq. (3) may be further simplified:", |
| "cite_spans": [ |
| { |
| "start": 397, |
| "end": 420, |
| "text": "(Ponte and Croft, 1998;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 421, |
| "end": 436, |
| "text": "Hiemstra, 1998;", |
| "ref_id": null |
| }, |
| { |
| "start": 437, |
| "end": 462, |
| "text": "Lavrenko and Croft, 2001)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (z k |Con(w S 1 )) \u2248 r j=1 P (cw S j |z k ) K l=1 r j=1 P (cw S j |z l )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The representation of the context set in the latent semantic space is then:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "vec(Con(w S 1 )) = [P (z1|Con(w S 1 )), . . . , P (zK |Con(w S 1 ))]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We can then compute the similarity between words and sets of words given in the same latent semantic space in a uniform way, irrespective of their actual language. We use all these properties when building our context-sensitive CLSS models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "One remark: As a by-product of our modeling approach, by this procedure for computing representations for sets of words, we have in fact paved the way towards compositional cross-lingual models of similarity which rely on latent cross-lingual concepts. Similar to compositional models in monolingual settings (Mitchell and Lapata, 2010; Rudolph and Giesbrecht, 2010; Baroni and Zamparelli, 2010; Socher et al., 2011; Grefenstette and Sadrzadeh, 2011; Blacoe and Lapata, 2012; Clarke, 2012; Socher et al., 2012) and multilingual settings (Hermann and Blunsom, 2014; Ko\u010disk\u00fd et al., 2014) , the representation of a set of words (e.g., a phrase or a sentence) is exactly the same as the representation of a single word; it is simply a K-dimensional real-valued vector. Our work on inducing structured representations of words and text units beyond words is similar to (Klementiev et al., 2012; Hermann and Blunsom, 2014; Ko\u010disk\u00fd et al., 2014) , but unlike them, we do not need high-quality sentence-aligned parallel data to induce bilingual text representations. Moreover, this work on compositionality in multilingual settings is only preliminary (e.g., we treat phrases and sentences as bags-of-words), and in future work we will aim to include syntactic information in the composition models as already done in monolingual settings (Socher et al., 2012; Hermann and Blunsom, 2013) . Intuition behind the Approach. Going back to our novel CLSS models in context, these models rely on the representations of words and their contexts in the same latent semantic space spanned by latent cross-lingual concepts/topics. The models differ in the way the contextual knowledge is fused with the out-of-context word representations.", |
| "cite_spans": [ |
| { |
| "start": 309, |
| "end": 336, |
| "text": "(Mitchell and Lapata, 2010;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 337, |
| "end": 366, |
| "text": "Rudolph and Giesbrecht, 2010;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 367, |
| "end": 395, |
| "text": "Baroni and Zamparelli, 2010;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 396, |
| "end": 416, |
| "text": "Socher et al., 2011;", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 417, |
| "end": 450, |
| "text": "Grefenstette and Sadrzadeh, 2011;", |
| "ref_id": null |
| }, |
| { |
| "start": 451, |
| "end": 475, |
| "text": "Blacoe and Lapata, 2012;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 476, |
| "end": 489, |
| "text": "Clarke, 2012;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 490, |
| "end": 510, |
| "text": "Socher et al., 2012)", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 565, |
| "end": 586, |
| "text": "Ko\u010disk\u00fd et al., 2014)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 865, |
| "end": 890, |
| "text": "(Klementiev et al., 2012;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 891, |
| "end": 917, |
| "text": "Hermann and Blunsom, 2014;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 918, |
| "end": 939, |
| "text": "Ko\u010disk\u00fd et al., 2014)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1332, |
| "end": 1353, |
| "text": "(Socher et al., 2012;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 1354, |
| "end": 1380, |
| "text": "Hermann and Blunsom, 2013)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The key idea behind these models is to represent a word w S 1 in the latent semantic space as a distribution over the latent cross-lingual concepts, but now with an additional modulation of the representation after taking its local context into account. The modulated word representation in the semantic space spanned by K latent cross-lingual concepts is then:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "vec(w S 1 , Con(w S 1 )) = [P (z1|w S 1 ), . . . , P (zK |w S 1 )] (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where P (z K |w S 1 ) denotes the recalculated (or modulated) probability score for the conditional concept/topic distribution of w S 1 after observing its context Con(w S 1 ). For an illustration of the key idea, see fig. 1 . The intuition is that the context helps to disambiguate the true meaning of the occurrence of the word w S 1 . In other words, after observing the context of the word w S 1 , fewer latent cross-lingual concepts will share most of the probability mass in the modulated context-aware word representation. Model I: Direct-Fusion. The first approach makes the conditional distribution over latent semantic concepts directly dependent on both word w S 1 and its context Con(w S 1 ). The probability score P (z k |w S 1 ) from eq. (5) for each z k \u2208 Z is then given as P (z k |w S 1 ) = P (z k |w S 1 , Con(w S 1 )). We have to estimate the probability P (z k |w S 1 , Con(w S 1 )), that is, the probability that word w S 1 is assigned to the latent concept/topic z k given its context Con(w S 1 ):", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 218, |
| "end": 224, |
| "text": "fig. 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (z k |w S 1 , Con(w S 1 )) = P (z k , w S 1 )P (Con(w S 1 )|z k ) K l=1 P (z l , w S 1 )P (Con(w S 1 )|z l )", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Since P (z k , w S 1 ) = P (w S 1 |z k )P (z k ), if we closely follow the derivation from eq. (3) which shows how to project context into the latent semantic space (and again assume the uniform topic prior P (z k )), we finally obtain the following formula:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (z k |w S 1 ) \u2248 P (w S 1 |z k ) r j=1 P (cw S j |z k ) K l=1 P (w S 1 |z l ) r j=1 P (cw S j |z l )", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The ranking of all words w T 2 \u2208 V T according to their similarity to w S 1 may be computed by detecting the similarity score between their representation in the K-dimensional latent semantic space and the modulated source word representation as given by eq. (5) and eq. (7) using any of the existing similarity functions (Lee, 1999; Cha, 2007) . The similarity score Sim(w S 1 , w T 2 , Con(w S 1 )) between some w T 2 \u2208 V T represented by its vector vec(w T", |
| "cite_spans": [ |
| { |
| "start": 322, |
| "end": 333, |
| "text": "(Lee, 1999;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 334, |
| "end": 344, |
| "text": "Cha, 2007)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "2 ) and the observed word w S 1 given its context Con(w S 1 ) is computed as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "sim(w S 1 , w T 2 , Con(w S 1 )) = SF vec w S 1 , Con(w S 1 ) , vec w T 2", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where SF denotes a similarity function. Words are then ranked according to their respective similarity scores and the best scoring candidate may be selected as the best translation of an occurrence of the word w S 1 given its local context. Since the contextual knowledge is integrated directly into the estimation of probability P (z k |w S 1 , Con(w S 1 )), we name this context-aware CLSS model the Direct-Fusion model. Model II: Smoothed-Fusion. The next model follows the modeling paradigm established within the framework of language modeling (LM), where the idea is to \"back off\" to a lower order Ngram in case we do not possess any evidence about a higher-order N-gram (Jurafsky and Martin, 2000) . The idea now is to smooth the representation of a word in the latent semantic space induced only by the words in its local context with the out-of-context type-based representation of that word induced directly from a large training corpus. In other words, the modulated probability score P (z k |w S 1 ) from eq. (5) is calculated as:", |
| "cite_spans": [ |
| { |
| "start": 677, |
| "end": 704, |
| "text": "(Jurafsky and Martin, 2000)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "P (z k |w S 1 ) = \u03bb1P (z k |Con(w S 1 )) + (1 \u2212 \u03bb1)P (z k |w S 1 ) (9)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where \u03bb 1 is the interpolation parameter, P (z k |w S 1 ) is the out-of-context conditional concept probability score as in eq. (1), and P (z k |Con(w S 1 )) is given by eq. (3). This model compromises between the pure contextual word representation and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "z 3 z 2 z 1 coach (in isolation) entrenador autocar z 3 z 2 z 1 coach (contextualized) entrenador autocar", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The coach of his team was not satisfied with the game yesterday.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "K coach K coach", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-Lingual Semantic Similarity in Context via Latent Concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "CONTEXT-SENSITIVE Figure 1 : An illustrative toy example of the main intuitions in our probabilistic framework for building context sensitive models with only three latent cross-lingual concepts (axes z 1 , z 2 and z 3 ): A change in meaning is reflected as a change in a probability distribution over latent cross-lingual concepts that span a shared latent semantic space. A change in the probability distribution may then actually steer an English word coach towards its correct (Spanish) meaning in context.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 18, |
| "end": 26, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "CONTEXT-INSENSITIVE", |
| "sec_num": null |
| }, |
| { |
| "text": "the out-of-context word representation. In cases when the local context of word w S 1 is informative enough, the factor P (z k |Con(w S 1 )) is sufficient to provide the ranking of terms in V T , that is, to detect words that are semantically similar to w S 1 based on its context. However, if the context is not reliable, we have to smooth the pure contextbased representation with the out-of-context word representation (the factor P (z k |w S 1 )). We call this model the Smoothed-Fusion model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CONTEXT-INSENSITIVE", |
| "sec_num": null |
| }, |
| { |
| "text": "The ranking of words w T 2 \u2208 V T then finally proceeds in the same manner as in Direct-Fusion following eq. (8), but now using eq. (9) for the modulated probability scores P (z k |w S 1 ). Model III: Late-Fusion. The last model is conceptually similar to Smoothed-Fusion, but it performs smoothing at a later stage. It proceeds in two steps: (1) Given a target word w T 2 \u2208 V T , the model computes similarity scores separately between (i) the context set Con(w S 1 ) and w T 2 , and (ii) the word w S 1 in isolation and w T 2 (again, on the type level); (2) It linearly combines the obtained similarity scores. More formally, we may write:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CONTEXT-INSENSITIVE", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Sim(w S 1 , w T 2 , Con(w S 1 )) = \u03bb2SF vec Con(w S 1 ) , vec w T 2 + (1 \u2212 \u03bb2)SF vec w S 1 , vec w T 2", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "CONTEXT-INSENSITIVE", |
| "sec_num": null |
| }, |
| { |
| "text": "where \u03bb 2 is the interpolation parameter. Since this model computes the similarity with each target word separately for the source word in isolation and its local context, and combines the ob-tained similarity scores after the computations, this model is called Late-Fusion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CONTEXT-INSENSITIVE", |
| "sec_num": null |
| }, |
| { |
| "text": "Evaluation Task: Suggesting Word Translations in Context. Given an occurrence of a polysemous word w S 1 \u2208 V S in the source language L S with vocabulary V S , the task is to choose the correct translation in the target language L T of that particular occurrence of w S 1 from the given set T = {t T 1 , . . . , t T q }, T \u2286 V T , of its q possible translations/meanings (i.e., its translation or sense inventory). The task of suggesting a word translation in context may be interpreted as ranking the q translations with respect to the observed local context Con(w S 1 ) of the occurrence of the word w S", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "1 . The best scoring translation candidate in the ranked list is then the suggested correct translation for that particular occurrence of w S 1 after observing its local context Con(w S 1 ). Training Data. We use the following corpora for inducing latent cross-lingual concepts/topics, i.e., for training our multilingual topic model: (i) a collection of 13, 696 Spanish-English Wikipedia article pairs (Wiki-ES-EN), (ii) a collection of 18, 898 Italian-English Wikipedia article pairs, (iii) a collection of 7, 612 Dutch-English Wikipedia article pairs (Wiki-NL-EN), and (iv) the Wiki-NL-EN corpus augmented with 6,206 Dutch-English document pairs from Europarl (Koehn, 2005 ) (Wiki+EP-NL-EN). The corpora were previously used in . No explicit use is made of sentence-level alignments in Europarl.", |
| "cite_spans": [ |
| { |
| "start": 663, |
| "end": 675, |
| "text": "(Koehn, 2005", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Correct Translation (EN) 1. I primi calci furono prodotti in legno ma recentemente... stock 2. In caso di osteoporosi si verifica un eccesso di rilascio di calcio dallo scheletro... calcium 3. La crescita del calcio femminile professionistico ha visto il lancio di competizioni... football 4. Il calcio di questa pistola (Beretta Modello 21a, calibro .25) ha le guancette in materiale... stock All corpora are theme-aligned comparable corpora, i.e, the aligned document pairs discuss similar themes, but are in general not direct translations (except for Europarl). By training on Wiki+EP-NL-EN we want to test how the training corpus of higher quality affects the estimation of latent cross-lingual concepts that span the shared latent semantic space and, consequently, the overall results in the task of suggesting word translations in context. Following prior work (Koehn and Knight, 2002; Haghighi et al., 2008; Prochasson and Fung, 2011; , we retain only nouns that occur at least 5 times in the corpus. We record lemmatized word forms when available, and original forms otherwise. We use TreeTagger (Schmid, 1994) for POS tagging and lemmatization.", |
| "cite_spans": [ |
| { |
| "start": 868, |
| "end": 892, |
| "text": "(Koehn and Knight, 2002;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 893, |
| "end": 915, |
| "text": "Haghighi et al., 2008;", |
| "ref_id": null |
| }, |
| { |
| "start": 916, |
| "end": 942, |
| "text": "Prochasson and Fung, 2011;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 1105, |
| "end": 1119, |
| "text": "(Schmid, 1994)", |
| "ref_id": "BIBREF51" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence in Italian", |
| "sec_num": null |
| }, |
| { |
| "text": "Test Data. We have constructed test datasets in Spanish (ES), Italian (IT) and Dutch (NL), where the aim is to find their correct translation in English (EN) given the sentential context. We have selected 15 polysemous nouns (see tab. 2 for the list of nouns along with their possible translations) in each of the 3 languages, and have manually extracted 24 sentences (not present in the training data) for each noun that capture different meanings of the noun from Wikipedia. In order to construct datasets that are balanced across different possible translations of a noun, in case of q different translation candidates in T for some word w S 1 , the dataset contains exactly 24/q sentences for each translation from T . In total, we have designed 360 sentences for each language pair (ES/IT/NL-EN), 1080 sentences in total. 1 . We have used 5 extra nouns with 20 sentences each as a development set to tune the parameters of our models. As a by-product, we have built an initial repository of ES/IT/NL ambiguous words. Tab. 1 presents a small sample from the IT evaluation dataset, and illustrates the task of suggesting word translations in context. Evaluation Procedure. Our task is to present the system a list of possible translations and let the system decide a single most likely translation given the word and its sentential context. Ground truth thus contains one word, that is, one correct translation for each sentence from the evaluation dataset. We have manually annotated the correct translation for the ground truth 1 by inspecting the discourse in Wikipedia articles and the interlingual Wikipedia links. We measure the performance of all models as Top 1 accuracy (Acc 1 ) (Gaussier et al., 2004; Tamura et al., 2012) . It denotes the number of word instances from the evaluation dataset whose top proposed candidate in the ranked list of translation candidates from T is exactly the correct translation for that word instance as given by ground truth over the total number of test word instances (360 in each test dataset). Parameters. We have tuned \u03bb 1 and \u03bb 2 on the development sets. We set \u03bb 1 = \u03bb 2 = 0.9 for all language pairs. We use sorted context sets (see sect. 2) and perform a cut-off at M = 3 most descriptive context words in the sorted context sets for all models. In the following section we discuss the utility of this context sorting and pruning, as well as its influence on the overall results. Inducing Latent Cross-Lingual Concepts. Our context-aware models are generic and allow experimentations with different models that induce latent cross-lingual semantic concepts. However, in this particular work we present results obtained by a multilingual probabilistic topic model called bilingual LDA (Mimno et al., 2009; Ni et al., 2009; De Smet and Moens, 2009) . The BiLDA model is a straightforward multilingual extension of the standard LDA model (Blei et al., 2003) . For the details regarding the modeling, generative story and training of the bilingual LDA model, we refer the interested reader to the aforementioned relevant literature.", |
| "cite_spans": [ |
| { |
| "start": 1691, |
| "end": 1714, |
| "text": "(Gaussier et al., 2004;", |
| "ref_id": null |
| }, |
| { |
| "start": 1715, |
| "end": 1735, |
| "text": "Tamura et al., 2012)", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 2737, |
| "end": 2757, |
| "text": "(Mimno et al., 2009;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 2758, |
| "end": 2774, |
| "text": "Ni et al., 2009;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 2775, |
| "end": 2799, |
| "text": "De Smet and Moens, 2009)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 2888, |
| "end": 2907, |
| "text": "(Blei et al., 2003)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence in Italian", |
| "sec_num": null |
| }, |
| { |
| "text": "We have used the Gibbs sampling procedure (Geman and Geman, 1984) tailored for BiLDA in particular for training and have experimented with different number of topics K in the interval 300 \u2212 2500. Here, we present only the results obtained with K = 2000 for all language pairs which also yielded the best or near-optimal performance in (Dinu and Lapata, 2010b; Vuli\u0107 et al., 2011) .", |
| "cite_spans": [ |
| { |
| "start": 42, |
| "end": 65, |
| "text": "(Geman and Geman, 1984)", |
| "ref_id": null |
| }, |
| { |
| "start": 335, |
| "end": 359, |
| "text": "(Dinu and Lapata, 2010b;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 360, |
| "end": 379, |
| "text": "Vuli\u0107 et al., 2011)", |
| "ref_id": "BIBREF61" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence in Italian", |
| "sec_num": null |
| }, |
| { |
| "text": "Other parameters of the model are set to the typical values according to Steyvers and Griffiths (2007) : \u03b1 = 50/K and \u03b2 = 0.01. 2 Models in Comparison. We test the performance of our Direct-Fusion, Smoothed-Fusion and Late-Fusion models, and compare their results with the context-insensitive CLSS models described in sect. 2 (No-Context). We provide results with two different similarity functions:", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 102, |
| "text": "Steyvers and Griffiths (2007)", |
| "ref_id": "BIBREF54" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence in Italian", |
| "sec_num": null |
| }, |
| { |
| "text": "(1) We have tested different SF-s (e.g., the Kullback-Leibler and the Jensen-Shannon divergence, the cosine measure) on the K-dimensional vector representations, and have detected that in general the best scores are obtained with the Bhattacharyya coefficient (BC) (Cha, 2007; Kazama et al., 2010) ,", |
| "cite_spans": [ |
| { |
| "start": 265, |
| "end": 276, |
| "text": "(Cha, 2007;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 277, |
| "end": 297, |
| "text": "Kazama et al., 2010)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence in Italian", |
| "sec_num": null |
| }, |
| { |
| "text": "Another similarity method we use is the socalled Cue method (Griffiths et al., 2007; Vuli\u0107 et al., 2011) , which models the probability that a target word t T i will be generated as an association response given some cue source word w S", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 84, |
| "text": "(Griffiths et al., 2007;", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 85, |
| "end": 104, |
| "text": "Vuli\u0107 et al., 2011)", |
| "ref_id": "BIBREF61" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence in Italian", |
| "sec_num": null |
| }, |
| { |
| "text": "1 . In short, the method computes the score P (t T i |w S 1 ) = P (t T i |z k )P (z k |w S 1 ). We can use the scores P (t T i |w S 1 ) obtained by inputting out-ofcontext probability scores P (z k |w S 1 ) or modulated probability scores P (z k |w S 1 ) to produce the ranking of translation candidates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence in Italian", |
| "sec_num": null |
| }, |
| { |
| "text": "The performance of all the models in comparison is displayed in tab. 3. These results lead us to several conclusions: (i) All proposed context-sensitive CLSS models suggesting word translations in context significantly outperform context-insensitive CLSS models, which are able to produce only word translations in isolation. The improvements in results when taking context into account are ob- ii) The choice of a similarity function influences the results. On average, the Cue method as SF outperforms other standard similarity functions (e.g., Kullback-Leibler, Jensen-Shannon, cosine, BC) in this evaluation task. However, it is again important to state that regardless of the actual choice of SF, context-aware models that modulate out-ofcontext word representations using the knowledge of local context outscore context-insensitive models that utilize non-modulated out-of-context representations (with all other parameters equal). (iii) The Direct-Fusion model, conceptually similar to a model of word similarity in context in monolingual settings (Dinu and Lapata, 2010a) , is outperformed by the other two context-sensitive models. In Direct-Fusion, the observed word and its context are modeled in the same fashion, that is, the model does not distinguish between the word and its surrounding context when it computes the modulated probability scores P (z k |w S 1 ) (see eq. (7)). Unlike Direct-Fusion, the modeling assumptions of Smoothed-Fusion and Late-Fusion provide a clear distinction between the observed word w S 1 and its context Con(w S 1 ) and combine the outof-context representation of w S 1 and its contextual knowledge into a smoothed LM-inspired probabilistic model. As the results reveal, that strategy leads to better overall scores. The best scores in general are obtained by Smoothed-Fusion, but it is also outperformed by Late-Fusion in several experimental runs where BC was used as SF. However, the difference in results between Smoothed-Fusion and Late-Fusion in these experimental runs is not statistically significant according to a chisquared significance test (p < 0.05). (iv) The results for Dutch-English are influenced by the quality of training data. The performance of our models of similarity is higher for models that rely on latent-cross lingual topics estimated from the data of higher quality (i.e., compare the results when trained on Wiki and Wiki+EP in tab. 3). The overall quality of our models of similarity is of course dependent on the quality of the latent cross-lingual topics estimated from training data, and the quality of these latent cross-lingual concepts is further dependent on the quality of multilingual training data. This finding is in line with a similar finding reported for the task of bilingual lexicon extraction . (v) Although Dutch is regarded as more similar to English than Italian or Spanish, we do not observe any major increase in the results on both test datasets for the English-Dutch language pair compared to English-Spanish/Italian. That phenomenon may be attributed to the difference in size and quality of our training Wikipedia datasets. Moreover, while the probabilistic framework proposed in this chapter is completely language pair agnostic as it does not make any language pair dependent modeling assumptions, we acknowledge the fact that all three language pairs comprise languages coming from the same phylum, that is, the Indo-European language family. Future extensions of our probabilistic modeling framework also include porting the framework to other more distant language pairs that do not share the same roots nor the same alphabet (e.g., English-Chinese/Hindi). Analysis of Context Sorting and Pruning. We also investigate the utility of context sorting and pruning, and its influence on the overall results in our evaluation task. Therefore, we have conducted experiments with sorted context sets that were pruned at different positions, ranging from 1 (only the most similar word to w S 1 in a sentence is included in the context set Con(w S 1 )) to All (all words occurring in a same sentence with w S 1 are included in Con(w S 1 )). The monolingual similarity between w S 1 and each potential context word in a sentence has been computed using BC on their out-of-context representations in the latent semantic space spanned by cross-lingual topics. Fig. 2 shows how the size of the sorted context influences the overall results. The presented results have been obtained by the Cue+Smoothed-Fusion combination, but a similar behavior is observed when employing other combinations. Fig. 2 clearly indicates the importance of context sorting and pruning. The procedure ensures that only the most semantically similar words in a given scope (e.g., a sentence) influence the choice of a correct meaning. In other words, closely semantically similar words in the same sentence are more reliable indicators for the most probable word meaning. They are more informative in modulating the out-of-context word representations in context-sensitive similarity models. We observe large improvements in scores when we retain only the top M semantically similar words in the context set (e.g., when M =5, the scores are 0.694, 0.758, 0.717, and 0.767 for ES-EN, IT-EN, NL-EN (Wiki) and NL-EN (Wiki+EP), respectively; while the same scores are 0.572, 0.703, 0.639 and 0.672 when M =All).", |
| "cite_spans": [ |
| { |
| "start": 1055, |
| "end": 1079, |
| "text": "(Dinu and Lapata, 2010a)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 4357, |
| "end": 4363, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 4588, |
| "end": 4594, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We have proposed a new probabilistic approach to modeling cross-lingual semantic similarity in con-text, which relies only on co-occurrence counts and latent cross-lingual concepts which can be estimated using only comparable data. The approach is purely statistical and it does not make any additional language-pair dependent assumptions; it does not rely on a bilingual lexicon, orthographic clues or predefined ontology/category knowledge, and it does not require parallel data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The key idea in the approach is to represent words, regardless of their actual language, as distributions over the latent concepts, and both outof-context and contextualized word representations are then presented in the same latent space spanned by the latent semantic concepts. A change in word meaning after observing its context is reflected in a change of its distribution over the latent concepts. Results for three language pairs have clearly shown the importance of the newly developed modulated or \"contextualized\" word representations in the task of suggesting word translations in context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We believe that the proposed framework is only a start, as it ignites a series of new research questions and perspectives. One may further examine the influence of context scope (e.g., documentbased vs. sentence-based vs. window-based contexts), as well as context selection and aggregation (see sect. 2) on the contextualized models. For instance, similar to the model from\u00d3 S\u00e9aghdha and Korhonen (2011) in the monolingual setting, one may try to introduce dependency-based contexts (Pad\u00f3 and Lapata, 2007) and incorporate the syntax-based knowledge in the context-aware CLSS modeling. It is also worth studying other models that induce latent semantic concepts from multilingual data (see sect. 2) within this framework of context-sensitive CLSS modeling. One may also investigate a similar approach to contextsensitive CLSS modeling that could operate with explicitly defined concept categories (Gabrilovich and Markovitch, 2007; Cimiano et al., 2009; Hassan and Mihalcea, 2009; Hassan and Mihalcea, 2011; McCrae et al., 2013) .", |
| "cite_spans": [ |
| { |
| "start": 376, |
| "end": 404, |
| "text": "S\u00e9aghdha and Korhonen (2011)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 484, |
| "end": 507, |
| "text": "(Pad\u00f3 and Lapata, 2007)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 898, |
| "end": 932, |
| "text": "(Gabrilovich and Markovitch, 2007;", |
| "ref_id": null |
| }, |
| { |
| "start": 933, |
| "end": 954, |
| "text": "Cimiano et al., 2009;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 955, |
| "end": 981, |
| "text": "Hassan and Mihalcea, 2009;", |
| "ref_id": null |
| }, |
| { |
| "start": 982, |
| "end": 1008, |
| "text": "Hassan and Mihalcea, 2011;", |
| "ref_id": null |
| }, |
| { |
| "start": 1009, |
| "end": 1029, |
| "text": "McCrae et al., 2013)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Available at http://people.cs.kuleuven.be/ \u223civan.vulic/software/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We are well aware that different hyper-parameter settings(Asuncion et al., 2009;Lu et al., 2011), might have influence on the quality of learned latent cross-lingual concepts/topics and, consequently, the quality of latent semantic space, but that analysis is not the focus of this work. Additionally, we perform semantic space pruning(Reisinger and Mooney, 2010;. All computations are performed over the best scoring 100 cross-lingual topics according to their respective scores P (z k |w S i ) similarly to.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the anonymous reviewers for their comments and suggestions. This research has been carried out in the framework of the Smart Computer-Aided Translation Environment (SCATE) project (IWT-SBO 130041).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Term similarity-based query expansion for cross-language information retrieval", |
| "authors": [ |
| { |
| "first": "Mirna", |
| "middle": [], |
| "last": "Adriani", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "J" |
| ], |
| "last": "Van Rijsbergen", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of the 3rd European Conference on Research and Advanced Technology for Digital Libraries (ECDL)", |
| "volume": "", |
| "issue": "", |
| "pages": "311--322", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mirna Adriani and C. J. van Rijsbergen. 1999. Term similarity-based query expansion for cross-language information retrieval. In Proceedings of the 3rd Eu- ropean Conference on Research and Advanced Tech- nology for Digital Libraries (ECDL), pages 311- 322.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Unsupervised crosslingual lexical substitution", |
| "authors": [ |
| { |
| "first": "Marianna", |
| "middle": [], |
| "last": "Apidianaki", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 1st Workshop on Unsupervised Learning in NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "13--23", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marianna Apidianaki. 2011. Unsupervised cross- lingual lexical substitution. In Proceedings of the 1st Workshop on Unsupervised Learning in NLP, pages 13-23.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "On smoothing and inference for topic models", |
| "authors": [ |
| { |
| "first": "Arthur", |
| "middle": [], |
| "last": "Asuncion", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Welling", |
| "suffix": "" |
| }, |
| { |
| "first": "Padhraic", |
| "middle": [], |
| "last": "Smyth", |
| "suffix": "" |
| }, |
| { |
| "first": "Yee Whye", |
| "middle": [], |
| "last": "Teh", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 25th Conference on Uncertainty in Artificial Intelligence (UAI)", |
| "volume": "", |
| "issue": "", |
| "pages": "27--34", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arthur Asuncion, Max Welling, Padhraic Smyth, and Yee Whye Teh. 2009. On smoothing and inference for topic models. In Proceedings of the 25th Confer- ence on Uncertainty in Artificial Intelligence (UAI), pages 27-34.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Phrasal translation and query expansion techniques for cross-language information retrieval", |
| "authors": [ |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Ballesteros", |
| "suffix": "" |
| }, |
| { |
| "first": "W. Bruce", |
| "middle": [], |
| "last": "Croft", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Proceedings of the 20th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR)", |
| "volume": "", |
| "issue": "", |
| "pages": "84--91", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lisa Ballesteros and W. Bruce Croft. 1997. Phrasal translation and query expansion techniques for cross-language information retrieval. In Proceed- ings of the 20th Annual International ACM SIGIR Conference on Research and Development in Infor- mation Retrieval (SIGIR), pages 84-91.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Nouns are vectors, adjectives are matrices: Representing adjective-noun constructions in semantic space", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Zamparelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1183--1193", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Baroni and Roberto Zamparelli. 2010. Nouns are vectors, adjectives are matrices: Represent- ing adjective-noun constructions in semantic space. In Proceedings of the 2010 Conference on Em- pirical Methods in Natural Language Processing (EMNLP), pages 1183-1193.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A comparison of vector-based representations for semantic composition", |
| "authors": [ |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Blacoe", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "546--556", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Blacoe and Mirella Lapata. 2012. A com- parison of vector-based representations for seman- tic composition. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Lan- guage Processing and Computational Natural Lan- guage Learning (EMNLP-CoNLL), pages 546-556.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Latent Dirichlet Allocation", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [ |
| "M" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "I" |
| ], |
| "last": "Jordan", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "993--1022", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David M. Blei, Andrew Y. Ng, and Michael I. Jordan. 2003. Latent Dirichlet Allocation. Journal of Ma- chine Learning Research, 3:993-1022.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Multilingual topic models for unaligned text", |
| "authors": [ |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boyd", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "M" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 25th Conference on Uncertainty in Artificial Intelligence (UAI)", |
| "volume": "", |
| "issue": "", |
| "pages": "75--82", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jordan Boyd-Graber and David M. Blei. 2009. Mul- tilingual topic models for unaligned text. In Pro- ceedings of the 25th Conference on Uncertainty in Artificial Intelligence (UAI), pages 75-82.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "The mathematics of statistical machine translation: Parameter estimation", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "Della" |
| ], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [ |
| "A" |
| ], |
| "last": "Pietra", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [ |
| "L" |
| ], |
| "last": "Della Pietra", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mercer", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "19", |
| "issue": "2", |
| "pages": "263--311", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter F. Brown, Vincent J. Della Pietra, Stephen A. Della Pietra, and Robert L. Mercer. 1993. The mathematics of statistical machine translation: Parameter estimation. Computational Linguistics, 19(2):263-311.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Comprehensive survey on distance/similarity measures between probability density functions", |
| "authors": [ |
| { |
| "first": "Sung-Hyuk", |
| "middle": [], |
| "last": "Cha", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "International Journal of Mathematical Models and Methods in Applied Sciences", |
| "volume": "1", |
| "issue": "4", |
| "pages": "300--307", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sung-Hyuk Cha. 2007. Comprehensive survey on dis- tance/similarity measures between probability den- sity functions. International Journal of Mathe- matical Models and Methods in Applied Sciences, 1(4):300-307.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Explicit versus latent concept models for cross-language information retrieval", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Cimiano", |
| "suffix": "" |
| }, |
| { |
| "first": "Antje", |
| "middle": [], |
| "last": "Schultz", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergej", |
| "middle": [], |
| "last": "Sizov", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Sorg", |
| "suffix": "" |
| }, |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Staab", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 21st International Joint Conference on Artifical Intelligence (IJCAI)", |
| "volume": "", |
| "issue": "", |
| "pages": "1513--1518", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Cimiano, Antje Schultz, Sergej Sizov, Philipp Sorg, and Steffen Staab. 2009. Explicit versus la- tent concept models for cross-language information retrieval. In Proceedings of the 21st International Joint Conference on Artifical Intelligence (IJCAI), pages 1513-1518.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A context-theoretic framework for compositionality in distributional semantics", |
| "authors": [ |
| { |
| "first": "Daoud", |
| "middle": [], |
| "last": "Clarke", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Computational Linguistics", |
| "volume": "38", |
| "issue": "1", |
| "pages": "41--71", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daoud Clarke. 2012. A context-theoretic frame- work for compositionality in distributional seman- tics. Computational Linguistics, 38(1):41-71.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Unsupervised part-of-speech tagging with bilingual graphbased projections", |
| "authors": [ |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies (ACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "600--609", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dipanjan Das and Slav Petrov. 2011. Unsuper- vised part-of-speech tagging with bilingual graph- based projections. In Proceedings of the 49th An- nual Meeting of the Association for Computational Linguistics: Human Language Technologies (ACL- HLT), pages 600-609.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Domain adaptation for machine translation by mining unseen words", |
| "authors": [ |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Jagadeesh", |
| "middle": [], |
| "last": "Jagarlamudi", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies (ACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "407--412", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hal Daum\u00e9 III and Jagadeesh Jagarlamudi. 2011. Do- main adaptation for machine translation by min- ing unseen words. In Proceedings of the 49th An- nual Meeting of the Association for Computational Linguistics: Human Language Technologies (ACL- HLT), pages 407-412.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Cross-language linking of news stories on the Web using interlingual topic modeling", |
| "authors": [ |
| { |
| "first": "De", |
| "middle": [], |
| "last": "Wim", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Francine", |
| "middle": [], |
| "last": "Smet", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Moens", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the CIKM 2009 Workshop on Social Web Search and Mining (SWSM@CIKM)", |
| "volume": "", |
| "issue": "", |
| "pages": "57--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wim De Smet and Marie-Francine Moens. 2009. Cross-language linking of news stories on the Web using interlingual topic modeling. In Proceedings of the CIKM 2009 Workshop on Social Web Search and Mining (SWSM@CIKM), pages 57-64.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "On the equivalence between non-negative matrix factorization and probabilistic latent semantic indexing", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [ |
| "Q" |
| ], |
| "last": "Chris", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Computational Statistics & Data Analysis", |
| "volume": "52", |
| "issue": "8", |
| "pages": "3913--3927", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris H. Q. Ding, Tao Li, and Wei Peng. 2008. On the equivalence between non-negative matrix fac- torization and probabilistic latent semantic index- ing. Computational Statistics & Data Analysis, 52(8):3913-3927.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Measuring distributional similarity in context", |
| "authors": [ |
| { |
| "first": "Georgiana", |
| "middle": [], |
| "last": "Dinu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1162--1172", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Georgiana Dinu and Mirella Lapata. 2010a. Measur- ing distributional similarity in context. In Proceed- ings of the 2010 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1162-1172.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Topic models for meaning similarity in context", |
| "authors": [ |
| { |
| "first": "Georgiana", |
| "middle": [], |
| "last": "Dinu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 23rd International Conference on Computational Linguistics (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "250--258", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Georgiana Dinu and Mirella Lapata. 2010b. Topic models for meaning similarity in context. In Pro- ceedings of the 23rd International Conference on Computational Linguistics (COLING), pages 250- 258.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Automatic cross-linguistic information retrieval using Latent Semantic Indexing", |
| "authors": [ |
| { |
| "first": "Susan", |
| "middle": [ |
| "T" |
| ], |
| "last": "Dumais", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [ |
| "K" |
| ], |
| "last": "Landauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Littman", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the SIGIR Workshop on Cross-Linguistic Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "16--23", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Susan T. Dumais, Thomas K. Landauer, and Michael Littman. 1996. Automatic cross-linguistic infor- mation retrieval using Latent Semantic Indexing. In Proceedings of the SIGIR Workshop on Cross- Linguistic Information Retrieval, pages 16-23.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Syntactic transfer using a bilingual lexicon", |
| "authors": [ |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Durrett", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Pauls", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--11", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Greg Durrett, Adam Pauls, and Dan Klein. 2012. Syn- tactic transfer using a bilingual lexicon. In Pro- ceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Com- putational Natural Language Learning (EMNLP- CoNLL), pages 1-11.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Symmetric correspondence topic models for multilingual text analysis", |
| "authors": [ |
| { |
| "first": "Kosuke", |
| "middle": [], |
| "last": "Fukumasu", |
| "suffix": "" |
| }, |
| { |
| "first": "Koji", |
| "middle": [], |
| "last": "Eguchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "P" |
| ], |
| "last": "Xing", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Procedings of the 25th Annual Conference on Advances in Neural Information Processing Systems (NIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "1295--1303", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kosuke Fukumasu, Koji Eguchi, and Eric P. Xing. 2012. Symmetric correspondence topic models for multilingual text analysis. In Procedings of the 25th Annual Conference on Advances in Neural Informa- tion Processing Systems (NIPS), pages 1295-1303.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Speech and Language Processing: An Introduction to Natural Language Processing, Computational Linguistics, and Speech Recognition", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "James", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Jurafsky and James H. Martin. 2000. Speech and Language Processing: An Introduction to Nat- ural Language Processing, Computational Linguis- tics, and Speech Recognition. Prentice Hall PTR.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A Bayesian method for robust estimation of distributional similarities", |
| "authors": [ |
| { |
| "first": "Stijn", |
| "middle": [], |
| "last": "Jun'ichi Kazama", |
| "suffix": "" |
| }, |
| { |
| "first": "Kow", |
| "middle": [], |
| "last": "De Saeger", |
| "suffix": "" |
| }, |
| { |
| "first": "Masaki", |
| "middle": [], |
| "last": "Kuroda", |
| "suffix": "" |
| }, |
| { |
| "first": "Kentaro", |
| "middle": [], |
| "last": "Murata", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Torisawa", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "247--256", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jun'ichi Kazama, Stijn De Saeger, Kow Kuroda, Masaki Murata, and Kentaro Torisawa. 2010. A Bayesian method for robust estimation of distribu- tional similarities. In Proceedings of the 48th An- nual Meeting of the Association for Computational Linguistics (ACL), pages 247-256.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Multilingual named entity recognition using parallel data and metadata from Wikipedia", |
| "authors": [ |
| { |
| "first": "Sungchul", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwanjo", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "694--702", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sungchul Kim, Kristina Toutanova, and Hwanjo Yu. 2012. Multilingual named entity recognition using parallel data and metadata from Wikipedia. In Pro- ceedings of the 50th Annual Meeting of the Asso- ciation for Computational Linguistics (ACL), pages 694-702.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Inducing crosslingual distributed representations of words", |
| "authors": [ |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Klementiev", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| }, |
| { |
| "first": "Binod", |
| "middle": [], |
| "last": "Bhattarai", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 24th International Conference on Computational Linguistics (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "1459--1474", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexandre Klementiev, Ivan Titov, and Binod Bhat- tarai. 2012. Inducing crosslingual distributed repre- sentations of words. In Proceedings of the 24th In- ternational Conference on Computational Linguis- tics (COLING), pages 1459-1474.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Learning a translation lexicon from monolingual corpora", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the ACL Workshop on Unsupervised Lexical Acquisition (ULA)", |
| "volume": "", |
| "issue": "", |
| "pages": "9--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn and Kevin Knight. 2002. Learning a translation lexicon from monolingual corpora. In Proceedings of the ACL Workshop on Unsupervised Lexical Acquisition (ULA), pages 9-16.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Europarl: A parallel corpus for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 10th Machine Translation Summit (MT SUMMIT)", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2005. Europarl: A parallel corpus for statistical machine translation. In Proceedings of the 10th Machine Translation Summit (MT SUMMIT), pages 79-86.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Learning bilingual word representations by marginalizing alignments", |
| "authors": [ |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Ko\u010disk\u00fd", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "Moritz" |
| ], |
| "last": "Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "224--229", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom\u00e1\u0161 Ko\u010disk\u00fd, Karl Moritz Hermann, and Phil Blun- som. 2014. Learning bilingual word representations by marginalizing alignments. In Proceedings of the 52nd Annual Meeting of the Association for Compu- tational Linguistics (ACL), pages 224-229.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Relevance-based language models", |
| "authors": [ |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Lavrenko", |
| "suffix": "" |
| }, |
| { |
| "first": "W. Bruce", |
| "middle": [], |
| "last": "Croft", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the 24th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR)", |
| "volume": "", |
| "issue": "", |
| "pages": "120--127", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Lavrenko and W. Bruce Croft. 2001. Relevance-based language models. In Proceedings of the 24th Annual International ACM SIGIR Con- ference on Research and Development in Informa- tion Retrieval (SIGIR), pages 120-127.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Cross-lingual relevance models", |
| "authors": [ |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Lavrenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Choquette", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "Bruce" |
| ], |
| "last": "Croft", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 25th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR)", |
| "volume": "", |
| "issue": "", |
| "pages": "175--182", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Lavrenko, Martin Choquette, and W. Bruce Croft. 2002. Cross-lingual relevance models. In Proceedings of the 25th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR), pages 175-182.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Algorithms for non-negative matrix factorization", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Daniel", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "Sebastian" |
| ], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Seung", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of the 12th Conference on Advances in Neural Information Processing Systems (NIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "556--562", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel D. Lee and H. Sebastian Seung. 1999. Al- gorithms for non-negative matrix factorization. In Proceedings of the 12th Conference on Advances in Neural Information Processing Systems (NIPS), pages 556-562.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Measures of distributional similarity", |
| "authors": [ |
| { |
| "first": "Lillian", |
| "middle": [ |
| "Lee" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "25--32", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lillian Lee. 1999. Measures of distributional sim- ilarity. In Proceedings of the 37th Annual Meet- ing of the Association for Computational Linguistics (ACL), pages 25-32.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Dictionary-based techniques for cross-language information retrieval", |
| "authors": [ |
| { |
| "first": "Gina-Anne", |
| "middle": [], |
| "last": "Levow", |
| "suffix": "" |
| }, |
| { |
| "first": "Douglas", |
| "middle": [ |
| "W" |
| ], |
| "last": "Oard", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Information Processing and Management", |
| "volume": "41", |
| "issue": "3", |
| "pages": "523--547", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gina-Anne Levow, Douglas W. Oard, and Philip Resnik. 2005. Dictionary-based techniques for cross-language information retrieval. Information Processing and Management, 41(3):523-547.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Investigating task performance of probabilistic topic models: An empirical study of PLSA and LDA", |
| "authors": [ |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiaozhu", |
| "middle": [], |
| "last": "Mei", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengxiang", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "formation Retrieval", |
| "volume": "14", |
| "issue": "", |
| "pages": "178--203", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yue Lu, Qiaozhu Mei, and Chengxiang Zhai. 2011. Investigating task performance of probabilistic topic models: An empirical study of PLSA and LDA. In- formation Retrieval, 14(2):178-203.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Orthonormal explicit topic analysis for cross-lingual document matching", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Philip Mccrae", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Cimiano", |
| "suffix": "" |
| }, |
| { |
| "first": "Roman", |
| "middle": [], |
| "last": "Klinger", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1732--1740", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Philip McCrae, Philipp Cimiano, and Roman Klinger. 2013. Orthonormal explicit topic analysis for cross-lingual document matching. In Proceed- ings of the 2013 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1732-1740.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "A multilingual paradigm for automatic verb classification", |
| "authors": [ |
| { |
| "first": "Paola", |
| "middle": [], |
| "last": "Merlo", |
| "suffix": "" |
| }, |
| { |
| "first": "Suzanne", |
| "middle": [], |
| "last": "Stevenson", |
| "suffix": "" |
| }, |
| { |
| "first": "Vivian", |
| "middle": [], |
| "last": "Tsang", |
| "suffix": "" |
| }, |
| { |
| "first": "Gianluca", |
| "middle": [], |
| "last": "Allaria", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "207--214", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paola Merlo, Suzanne Stevenson, Vivian Tsang, and Gianluca Allaria. 2002. A multilingual paradigm for automatic verb classification. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics (ACL), pages 207-214.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Polylingual topic models", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanna", |
| "middle": [], |
| "last": "Wallach", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Naradowsky", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "880--889", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Mimno, Hanna Wallach, Jason Naradowsky, David A. Smith, and Andrew McCallum. 2009. Polylingual topic models. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 880-889.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Vector-based models of semantic composition", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "236--244", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeff Mitchell and Mirella Lapata. 2008. Vector-based models of semantic composition. In Proceedings of the 46th Annual Meeting of the Association for Com- putational Linguistics (ACL), pages 236-244.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Composition in distributional models of semantics", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Cognitive Science", |
| "volume": "34", |
| "issue": "8", |
| "pages": "1388--1429", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeff Mitchell and Mirella Lapata. 2010. Composition in distributional models of semantics. Cognitive Sci- ence, 34(8):1388-1429.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Exploiting parallel texts for word sense disambiguation: An empirical study", |
| "authors": [ |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Hwee Tou Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Yee Seng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chan", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "455--462", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hwee Tou Ng, Bin Wang, and Yee Seng Chan. 2003. Exploiting parallel texts for word sense disambigua- tion: An empirical study. In Proceedings of the 41st Annual Meeting of the Association for Compu- tational Linguistics (ACL), pages 455-462.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Mining multilingual topics from Wikipedia", |
| "authors": [ |
| { |
| "first": "Xiaochuan", |
| "middle": [], |
| "last": "Ni", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian-Tao", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 18th International World Wide Web Conference (WWW)", |
| "volume": "", |
| "issue": "", |
| "pages": "1155--1156", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaochuan Ni, Jian-Tao Sun, Jian Hu, and Zheng Chen. 2009. Mining multilingual topics from Wikipedia. In Proceedings of the 18th International World Wide Web Conference (WWW), pages 1155-1156.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Probabilistic models of similarity in syntactic context", |
| "authors": [ |
| { |
| "first": "Diarmuid\u00f3", |
| "middle": [], |
| "last": "S\u00e9aghdha", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1047--1057", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diarmuid\u00d3 S\u00e9aghdha and Anna Korhonen. 2011. Probabilistic models of similarity in syntactic con- text. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1047-1057.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "A systematic comparison of various statistical alignment models", |
| "authors": [ |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Franz", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Computational Linguistics", |
| "volume": "29", |
| "issue": "1", |
| "pages": "19--51", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franz Josef Och and Hermann Ney. 2003. A sys- tematic comparison of various statistical alignment models. Computational Linguistics, 29(1):19-51.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Dependency-based construction of semantic space models", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Pad\u00f3", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Computational Linguistics", |
| "volume": "33", |
| "issue": "2", |
| "pages": "161--199", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Pad\u00f3 and Mirella Lapata. 2007. Dependency-based construction of semantic space models. Computational Linguistics, 33(2):161-199.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Crosslingual annotation projection for semantic roles", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Pad\u00f3", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "36", |
| "issue": "", |
| "pages": "307--340", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Pad\u00f3 and Mirella Lapata. 2009. Cross- lingual annotation projection for semantic roles. Journal of Artificial Intelligence Research, 36:307- 340.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Crosslingual induction of selectional preferences with bilingual vector spaces", |
| "authors": [ |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Peirsman", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Pad\u00f3", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 11th Meeting of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "921--929", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yves Peirsman and Sebastian Pad\u00f3. 2010. Cross- lingual induction of selectional preferences with bilingual vector spaces. In Proceedings of the 11th Meeting of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies (NAACL-HLT), pages 921-929.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "A language modeling approach to information retrieval", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Jay", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "Bruce" |
| ], |
| "last": "Ponte", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Croft", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of the 21st Annual International ACM SI-GIR Conference on Research and Development in Information Retrieval (SIGIR)", |
| "volume": "", |
| "issue": "", |
| "pages": "275--281", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jay M. Ponte and W. Bruce Croft. 1998. A language modeling approach to information retrieval. In Pro- ceedings of the 21st Annual International ACM SI- GIR Conference on Research and Development in Information Retrieval (SIGIR), pages 275-281.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Translation ambiguity in and out of context", |
| "authors": [ |
| { |
| "first": "Anat", |
| "middle": [], |
| "last": "Prior", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuly", |
| "middle": [], |
| "last": "Wintner", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Macwhinney", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Applied Psycholinguistics", |
| "volume": "32", |
| "issue": "1", |
| "pages": "93--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anat Prior, Shuly Wintner, Brian MacWhinney, and Alon Lavie. 2011. Translation ambiguity in and out of context. Applied Psycholinguistics, 32(1):93- 111.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Rare word translation extraction from aligned comparable documents", |
| "authors": [ |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Prochasson", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies (ACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "1327--1335", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emmanuel Prochasson and Pascale Fung. 2011. Rare word translation extraction from aligned compara- ble documents. In Proceedings of the 49th An- nual Meeting of the Association for Computational Linguistics: Human Language Technologies (ACL- HLT), pages 1327-1335.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "A mixture model with sharing for lexical semantics", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Reisinger", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Raymond", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1173--1182", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph Reisinger and Raymond J. Mooney. 2010. A mixture model with sharing for lexical seman- tics. In Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1173-1182.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Compositional matrix-space models of language", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Rudolph", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugenie", |
| "middle": [], |
| "last": "Giesbrecht", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "907--916", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Rudolph and Eugenie Giesbrecht. 2010. Compositional matrix-space models of language. In Proceedings of the 48th Annual Meeting of the Asso- ciation for Computational Linguistics (ACL), pages 907-916.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Probabilistic part-of-speech tagging using decision trees", |
| "authors": [ |
| { |
| "first": "Helmut", |
| "middle": [], |
| "last": "Schmid", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Proceedings of the International Conference on New Methods in Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Helmut Schmid. 1994. Probabilistic part-of-speech tagging using decision trees. In Proceedings of the International Conference on New Methods in Lan- guage Processing.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Dynamic pooling and unfolding recursive autoencoders for paraphrase detection", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "H" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 24th Annual Conference on Advances in Neural Information Processing Systems (NIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "801--809", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Eric H. Huang, Jeffrey Pennington, Andrew Y. Ng, and Christopher D. Manning. 2011. Dynamic pooling and unfolding recursive autoen- coders for paraphrase detection. In Proceedings of the 24th Annual Conference on Advances in Neural Information Processing Systems (NIPS), pages 801- 809.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Semantic compositionality through recursive matrix-vector spaces", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Brody", |
| "middle": [], |
| "last": "Huval", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "1201--1211", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Brody Huval, Christopher D. Man- ning, and Andrew Y. Ng. 2012. Semantic com- positionality through recursive matrix-vector spaces. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Process- ing and Computational Natural Language Learning (EMNLP-CoNLL), pages 1201-1211.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Probabilistic topic models. Handbook of Latent Semantic Analysis", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steyvers", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "427", |
| "issue": "", |
| "pages": "424--440", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Steyvers and Tom Griffiths. 2007. Probabilistic topic models. Handbook of Latent Semantic Analy- sis, 427(7):424-440.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Token and type constraints for cross-lingual part-of-speech tagging", |
| "authors": [ |
| { |
| "first": "Oscar", |
| "middle": [], |
| "last": "T\u00e4ckstr\u00f6m", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Transactions of ACL", |
| "volume": "1", |
| "issue": "", |
| "pages": "1--12", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oscar T\u00e4ckstr\u00f6m, Dipanjan Das, Slav Petrov, Ryan McDonald, and Joakim Nivre. 2013a. Token and type constraints for cross-lingual part-of-speech tag- ging. Transactions of ACL, 1:1-12.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "Target language adaptation of discriminative transfer parsers", |
| "authors": [ |
| { |
| "first": "Oscar", |
| "middle": [], |
| "last": "T\u00e4ckstr\u00f6m", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 14th Meeting of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "1061--1071", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oscar T\u00e4ckstr\u00f6m, Ryan McDonald, and Joakim Nivre. 2013b. Target language adaptation of discriminative transfer parsers. In Proceedings of the 14th Meeting of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT), pages 1061-1071.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Bilingual lexicon extraction from comparable corpora using label propagation", |
| "authors": [ |
| { |
| "first": "Akihiro", |
| "middle": [], |
| "last": "Tamura", |
| "suffix": "" |
| }, |
| { |
| "first": "Taro", |
| "middle": [], |
| "last": "Watanabe", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "24--36", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Akihiro Tamura, Taro Watanabe, and Eiichiro Sumita. 2012. Bilingual lexicon extraction from compara- ble corpora using label propagation. In Proceed- ings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Com- putational Natural Language Learning (EMNLP- CoNLL), pages 24-36.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Mixtures of probabilistic principal component analysers", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [ |
| "E" |
| ], |
| "last": "Tipping", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "M" |
| ], |
| "last": "Bishop", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Neural Computation", |
| "volume": "11", |
| "issue": "2", |
| "pages": "443--482", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael E. Tipping and Christopher M. Bishop. 1999. Mixtures of probabilistic principal component anal- ysers. Neural Computation, 11(2):443-482.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "Scaling up automatic cross-lingual semantic role annotation", |
| "authors": [ |
| { |
| "first": "Paola", |
| "middle": [], |
| "last": "Lonneke Van Der Plas", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Merlo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Henderson", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies (ACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "299--304", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lonneke van der Plas, Paola Merlo, and James Hen- derson. 2011. Scaling up automatic cross-lingual semantic role annotation. In Proceedings of the 49th Annual Meeting of the Association for Computa- tional Linguistics: Human Language Technologies (ACL-HLT), pages 299-304.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "Crosslingual semantic similarity of words as the similarity of their semantic word responses", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Francine", |
| "middle": [], |
| "last": "Moens", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 14th Meeting of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "106--116", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107 and Marie-Francine Moens. 2013. Cross- lingual semantic similarity of words as the similarity of their semantic word responses. In Proceedings of the 14th Meeting of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies (NAACL-HLT), pages 106-116.", |
| "links": null |
| }, |
| "BIBREF61": { |
| "ref_id": "b61", |
| "title": "Identifying word translations from comparable corpora using latent topic models", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Wim", |
| "middle": [ |
| "De" |
| ], |
| "last": "Smet", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Francine", |
| "middle": [], |
| "last": "Moens", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies (ACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "479--484", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107, Wim De Smet, and Marie-Francine Moens. 2011. Identifying word translations from compara- ble corpora using latent topic models. In Proceed- ings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies (ACL-HLT), pages 479-484.", |
| "links": null |
| }, |
| "BIBREF62": { |
| "ref_id": "b62", |
| "title": "Cross-language information retrieval models based on latent topic models trained with documentaligned comparable corpora", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Wim", |
| "middle": [ |
| "De" |
| ], |
| "last": "Smet", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Francine", |
| "middle": [], |
| "last": "Moens", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Information Retrieval", |
| "volume": "16", |
| "issue": "3", |
| "pages": "331--368", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107, Wim De Smet, and Marie-Francine Moens. 2013. Cross-language information retrieval models based on latent topic models trained with document- aligned comparable corpora. Information Retrieval, 16(3):331-368.", |
| "links": null |
| }, |
| "BIBREF63": { |
| "ref_id": "b63", |
| "title": "Combining bidirectional translation and synonymy for cross-language information retrieval", |
| "authors": [ |
| { |
| "first": "Jianqiang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Douglas", |
| "middle": [ |
| "W" |
| ], |
| "last": "Oard", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 29th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR)", |
| "volume": "", |
| "issue": "", |
| "pages": "202--209", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jianqiang Wang and Douglas W. Oard. 2006. Com- bining bidirectional translation and synonymy for cross-language information retrieval. In Proceed- ings of the 29th Annual International ACM SIGIR Conference on Research and Development in Infor- mation Retrieval (SIGIR), pages 202-209.", |
| "links": null |
| }, |
| "BIBREF64": { |
| "ref_id": "b64", |
| "title": "Domain adaptation for statistical machine translation with domain dictionary and monolingual corpora", |
| "authors": [ |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengqing", |
| "middle": [], |
| "last": "Zong", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 22nd International Conference on Computational Linguistics (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "993--1000", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hua Wu, Haifeng Wang, and Chengqing Zong. 2008. Domain adaptation for statistical machine transla- tion with domain dictionary and monolingual cor- pora. In Proceedings of the 22nd International Con- ference on Computational Linguistics (COLING), pages 993-1000.", |
| "links": null |
| }, |
| "BIBREF65": { |
| "ref_id": "b65", |
| "title": "Inducing multilingual POS taggers and NP bracketers via robust projection across aligned corpora", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Grace", |
| "middle": [], |
| "last": "Ngai", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the 2nd Meeting of the North American Chapter of the Association for Computational Linguistics (NAACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "200--207", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Yarowsky and Grace Ngai. 2001. Inducing mul- tilingual POS taggers and NP bracketers via robust projection across aligned corpora. In Proceedings of the 2nd Meeting of the North American Chap- ter of the Association for Computational Linguistics (NAACL), pages 200-207.", |
| "links": null |
| }, |
| "BIBREF66": { |
| "ref_id": "b66", |
| "title": "Cross-lingual latent topic extraction", |
| "authors": [ |
| { |
| "first": "Duo", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiaozhu", |
| "middle": [], |
| "last": "Mei", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengxiang", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "1128--1137", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Duo Zhang, Qiaozhu Mei, and ChengXiang Zhai. 2010. Cross-lingual latent topic extraction. In Pro- ceedings of the 48th Annual Meeting of the Asso- ciation for Computational Linguistics (ACL), pages 1128-1137.", |
| "links": null |
| }, |
| "BIBREF67": { |
| "ref_id": "b67", |
| "title": "Cross language dependency parsing using a bilingual lexicon", |
| "authors": [ |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Chunyu", |
| "middle": [], |
| "last": "Kit", |
| "suffix": "" |
| }, |
| { |
| "first": "Guodong", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 47th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "55--63", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hai Zhao, Yan Song, Chunyu Kit, and Guodong Zhou. 2009. Cross language dependency parsing using a bilingual lexicon. In Proceedings of the 47th An- nual Meeting of the Association for Computational Linguistics (ACL), pages 55-63.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "The influence of the size of sorted context on the accuracy of word translation in context. The model is Cue+Smoothed-Fusion.", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "html": null, |
| "text": "Example sentences from our IT evaluation dataset with corresponding correct translations.", |
| "content": "<table><tr><td>Spanish</td><td>Italian</td><td>Dutch</td></tr><tr><td>Ambiguous word</td><td>Ambiguous word</td><td>Ambiguous word</td></tr><tr><td>(Possible senses/translations)</td><td>(Possible senses/translations)</td><td>(Possible senses/translations)</td></tr><tr><td>1. estaci\u00f3n</td><td>1. raggio</td><td>1. toren</td></tr><tr><td>(station; season)</td><td>(ray; radius; spoke)</td><td>(rook; tower)</td></tr><tr><td>2. ensayo</td><td>2. accordo</td><td>2. beeld</td></tr><tr><td>(essay; rehearsal; trial)</td><td>(chord; agreement)</td><td>(image; statue)</td></tr><tr><td>3. n\u00facleo</td><td>3. moto</td><td>3. blade</td></tr><tr><td>(core; kernel; nucleus)</td><td>(motion; motorcycle)</td><td>(blade; leaf; magazine)</td></tr><tr><td>4. vela</td><td>4. calcio</td><td>4.fusie</td></tr><tr><td>(sail; candle)</td><td>(calcium; football; stock)</td><td>(fusion; merger)</td></tr><tr><td>5. escudo</td><td>5. terra</td><td>5. stam</td></tr><tr><td>(escudo; escutcheon; shield)</td><td>(earth; land)</td><td>(stem; trunk; tribe)</td></tr><tr><td>6. papa</td><td>6. tavola</td><td>6. koper</td></tr><tr><td>(Pope; potato)</td><td>(board; panel; table)</td><td>(copper; buyer)</td></tr><tr><td>7. cola</td><td>7. campione</td><td>7. bloem</td></tr><tr><td>(glue; coke; tail; queue)</td><td>(champion; sample)</td><td>(flower; flour)</td></tr><tr><td>8. cometa</td><td>8. carta</td><td>8. spanning</td></tr><tr><td>(comet; kite)</td><td>(card; paper; map)</td><td>(voltage; tension; stress)</td></tr><tr><td>9. disco</td><td>9. piano</td><td>9. noot</td></tr><tr><td>(disco; discus; disk)</td><td>(floor; plane; plan; piano)</td><td>(note; nut)</td></tr><tr><td>10. banda</td><td>10. disco</td><td>10. akkoord</td></tr><tr><td>(band; gang; strip)</td><td>(disco; discus; disk)</td><td>(chord; agreement)</td></tr><tr><td>11. cinta</td><td>11. istruzione</td><td>11. munt</td></tr><tr><td>(ribbon; tape)</td><td>(education; instruction)</td><td>(coin; currency; mint)</td></tr><tr><td>12. banco</td><td>12. gabinetto</td><td>12. pool</td></tr><tr><td>(bank; bench; shoal)</td><td>(cabinet; office; toilet)</td><td>(pole; pool)</td></tr><tr><td>13. frente</td><td>13. torre</td><td>13. band</td></tr><tr><td>(forehead; front)</td><td>(rook; tower)</td><td>(band; tyre; tape)</td></tr><tr><td>14. fuga</td><td>14. campo</td><td>14. kern</td></tr><tr><td>(escape; fugue; leak)</td><td>(camp; field)</td><td>(core; kernel; nucleus)</td></tr><tr><td>15. gota</td><td>15. gomma</td><td>15. kop</td></tr><tr><td>(gout; drop)</td><td>(rubber; gum; tyre)</td><td>(cup; head)</td></tr></table>", |
| "num": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "html": null, |
| "text": "Sets of 15 ambiguous words in Spanish, Italian and Dutch from our test set accompanied by the sets of their respective possible senses/translations in English.", |
| "content": "<table/>", |
| "num": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "html": null, |
| "text": "Results on the 3 evaluation datasets. Translation direction is ES/IT/NL\u2192EN.", |
| "content": "<table><tr><td>The improvements</td></tr></table>", |
| "num": null |
| } |
| } |
| } |
| } |