| { |
| "paper_id": "P15-1023", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:11:28.510602Z" |
| }, |
| "title": "A Context-Aware Topic Model for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Jinsong", |
| "middle": [], |
| "last": "Su", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Soochow University", |
| "location": { |
| "settlement": "Suzhou", |
| "country": "China" |
| } |
| }, |
| "email": "jssu@xmu.edu.cn" |
| }, |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Xiamen University", |
| "location": { |
| "settlement": "Xiamen", |
| "country": "China" |
| } |
| }, |
| "email": "dyxiong@suda.edu.cn" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chinese Academy of Sciences", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Xianpei", |
| "middle": [], |
| "last": "Han", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "xianpei@nfs.iscas.ac.cn" |
| }, |
| { |
| "first": "Hongyu", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Soochow University", |
| "location": { |
| "settlement": "Suzhou", |
| "country": "China" |
| } |
| }, |
| "email": "hylin@xmu.edu.cn" |
| }, |
| { |
| "first": "Junfeng", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Soochow University", |
| "location": { |
| "settlement": "Suzhou", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "minzhang@suda.edu.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Lexical selection is crucial for statistical machine translation. Previous studies separately exploit sentence-level contexts and documentlevel topics for lexical selection, neglecting their correlations. In this paper, we propose a context-aware topic model for lexical selection, which not only models local contexts and global topics but also captures their correlations. The model uses target-side translations as hidden variables to connect document topics and source-side local contextual words. In order to learn hidden variables and distributions from data, we introduce a Gibbs sampling algorithm for statistical estimation and inference. A new translation probability based on distributions learned by the model is integrated into a translation system for lexical selection. Experiment results on NIST Chinese-English test sets demonstrate that 1) our model significantly outperforms previous lexical selection methods and 2) modeling correlations between local words and global topics can further improve translation quality.", |
| "pdf_parse": { |
| "paper_id": "P15-1023", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Lexical selection is crucial for statistical machine translation. Previous studies separately exploit sentence-level contexts and documentlevel topics for lexical selection, neglecting their correlations. In this paper, we propose a context-aware topic model for lexical selection, which not only models local contexts and global topics but also captures their correlations. The model uses target-side translations as hidden variables to connect document topics and source-side local contextual words. In order to learn hidden variables and distributions from data, we introduce a Gibbs sampling algorithm for statistical estimation and inference. A new translation probability based on distributions learned by the model is integrated into a translation system for lexical selection. Experiment results on NIST Chinese-English test sets demonstrate that 1) our model significantly outperforms previous lexical selection methods and 2) modeling correlations between local words and global topics can further improve translation quality.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Lexical selection is a very important task in statistical machine translation (SMT). Given a sentence in the source language, lexical selection statistically predicts translations for source words, based on various translation knowledge. Most conventional SMT systems (Koehn et al., 2003; Galley et al., 2006; Chiang, 2007) Figure 1 : A Chinese-English translation example to illustrate the effect of local contexts and global topics as well as their correlations on lexical selection. Each black line indicates a set of translation candidates for a Chinese content word (within a dotted box). Green lines point to translations that are favored by local contexts while blue lines show bidirectional associations between global topics and their consistent target-side translations.", |
| "cite_spans": [ |
| { |
| "start": 268, |
| "end": 288, |
| "text": "(Koehn et al., 2003;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 289, |
| "end": 309, |
| "text": "Galley et al., 2006;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 310, |
| "end": 323, |
| "text": "Chiang, 2007)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 324, |
| "end": 332, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Previous studies that explore richer information for lexical selection can be divided into two categories: 1) incorporating sentence-level contexts (Chan et al., 2007; Carpuat and Wu, 2007; Hasan et al., 2008; Mauser et al., 2009; Shen et al., 2009) or 2) integrating document-level topics (Xiao et al., 2011; Ture et al., 2012; Xiao et al., 2012; Eidelman et al., 2012; Hewavitharana et al., 2013; Xiong et al., 2013; Hasler et al., 2014a; Hasler et al., 2014b) into SMT. The methods in these two strands have shown their effectiveness on lexical selection.", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 167, |
| "text": "(Chan et al., 2007;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 168, |
| "end": 189, |
| "text": "Carpuat and Wu, 2007;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 190, |
| "end": 209, |
| "text": "Hasan et al., 2008;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 210, |
| "end": 230, |
| "text": "Mauser et al., 2009;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 231, |
| "end": 249, |
| "text": "Shen et al., 2009)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 290, |
| "end": 309, |
| "text": "(Xiao et al., 2011;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 310, |
| "end": 328, |
| "text": "Ture et al., 2012;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 329, |
| "end": 347, |
| "text": "Xiao et al., 2012;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 348, |
| "end": 370, |
| "text": "Eidelman et al., 2012;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 371, |
| "end": 398, |
| "text": "Hewavitharana et al., 2013;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 399, |
| "end": 418, |
| "text": "Xiong et al., 2013;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 419, |
| "end": 440, |
| "text": "Hasler et al., 2014a;", |
| "ref_id": null |
| }, |
| { |
| "start": 441, |
| "end": 462, |
| "text": "Hasler et al., 2014b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "However, correlations between sentence-and document-level contexts have never been explored before. It is clear that local contexts and global topics are often highly correlated. Consider a Chinese-English translation example presented in Figure 1 . On the one hand, if local contexts suggest that the source word \"\u00e1|/l\u00ecch\u01ceng\" should be translated in-to \"stance\", they will also indicate that the topic of the document where the example sentence occurs is about politics. The politics topic can be further used to enable the decoder to select a correct translation \"issue\" for another source word \"K /w\u00e8nt\u01d0\", which is consistent with this topic. On the other hand, if we know that this document mainly focuses on the politics topic, the candiate translation \"stance\" will be more compatible with the context of \"\u00e1|/l\u00ecch\u01ceng\" than the candiate translation \"attitude\". This is because neighboring sourceside words \"\u00a5I/zh\u014dngu\u00f3\" and \"\u00a5\u00e1/zh\u014dngl\u00ec\" often occur in documents that are about international politics. We believe that such correlations between local contextual words and global topics can be used to further improve lexical selection.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 239, |
| "end": 247, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose a unified framework to jointly model local contexts, global topics as well as their correlations for lexical selection. Specifically,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 First, we present a context-aware topic model (CATM) to exploit the features mentioned above for lexical selection in SMT. To the best of our knowledge, this is the first work to jointly model both local and global contexts for lexical selection in a topic model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Second, we present a Gibbs sampling algorithm to learn various distributions that are related to topics and translations from data. The translation probabilities derived from our model are integrated into SMT to allow collective lexical selection with both local and global informtion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We validate the effectiveness of our model on a state-of-the-art phrase-based translation system. Experiment results on the NIST Chinese-English translation task show that our model significantly outperforms previous lexical selection methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we describe basic assumptions and elaborate the proposed context-aware topic model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-Aware Topic Model", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In CATM, we assume that each source document d consists of two types of words: topical words which are related to topics of the document and contextual words which affect translation selections of topical words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Basic Assumptions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "As topics of a document are usually represented by content words in it, we choose source-side nouns, verbs, adjectives and adverbs as topical words. For contextual words, we use all words in a source sentence as contextual words. We assume that they are generated by target-side translations of other words than themselves. Note that a source word may be both topical and contextual. For each topical word, we identify its candidate translations from training corpus according to word alignments between the source and target language. We allow a target translation to be a phrase of length no more than 3 words. We refer to these translations of source topical words as target-side topical items, which can be either words or phrases. In the example shown in Figure 1 , all source words within dotted boxes are topical words. Topical word \"\u00e1|/l\u00ecch\u01ceng\" is supposed to be translated into a target-side topical item \"stance\", which is collectively suggested by neighboring contextual words \" \u00a5 I/zh\u014dnggu\u00f3\", \"\u00a5 \u00e1/zh\u014dngl\u00ec\" and the topic of the corresponding document.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 760, |
| "end": 769, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Basic Assumptions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In our model, all target-side topical items in a document are generated according to the following two assumptions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Basic Assumptions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Topic consistency assumption: All target-side topical items in a document should be consistent with the topic distribution of the document. For example, the translations \"issue\", \"stance\" tend to occur in documents about politics topic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Basic Assumptions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Context compatibility assumption: For a topical word, its translation (i.e., the counterpart target-side topical item) should be compatible with its neighboring contextual words. For instance, the translation \"stance\" of \"\u00e1|/l\u00ecch\u01ceng\" is closely related to contextual words \"\u00a5I/zh\u014dngu\u00f3\" and \"\u00a5\u00e1/zh\u014dngl\u00ec\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Basic Assumptions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The graphical representation of CATM, which visualizes the generative process of training data D, is shown in Figure 2 . Notations of CATM are presented in Table 1 . In CATM, each document d can be generated in the following three steps 1 : ical words \"\u00afK/w\u00e8nt\u00ed\", \"\u00e1|/l\u00ecch\u01ceng\", and contextual word \"\u00a5\u00e1/zh\u014dngl\u00ec\" in the following steps:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 110, |
| "end": 118, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 156, |
| "end": 163, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Step 1: The model generates a topic distribution for the corresponding document as {economy 0.25 , politics 0.75 }.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Step 2: Based on the topic distribution, we choose \"economy\" and \"politics\" as topic assignments for \"\u00afK/w\u00e8nt\u00ed\" and \"\u00e1|/l\u00ecch\u01ceng\" respectively; Then, according to the distributions of the two topics over target-side topical items, we generate target-side topical items \"issue\" and \"stance\"; Finally, according to the translation probability distributions of these two topical items over source-side topical words, we generate source-side topical words \"\u00afK/w\u00e8nt\u00ed\" and \"\u00e1|/l\u00ecch\u01ceng\" for them respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Step 3: For the contextual word \"\u00a5\u00e1/zh\u014dngl\u00ec\", we first collect target-side topical items of its neighboring topical words such as \"\u00afK/w\u00e8nt\u00ed\", \" \u00b1/b\u01ceoch\u00ed\" and \"\u00e1 |/l\u00ecch\u01ceng\" to form a targetside topical item set {\"issue\",\"keep\", \"stance\"}, from which we randomly sample one item \"stance\". Next, according to the generation probability distribution of \"stance\" over source contextual words, we finally generate the source contextual word \"\u00a5 \u00e1/zh\u014dngl\u00ec\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In the above generative process, all target-side topical items are generated from the underlying topics of a source document, which guarantees that selected target translations are topic-consistent. Ad-ditionally, each source contextual word is derived from a target-side topical item given its generation probability distribution. This makes selected target translations also compatible with source-side local contextual words. In this way, global topics, topical words, local contextual words and target-side topical items are highly correlated in CATM that exactly captures such correlations for lexical selection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We propose a Gibbs sampling algorithm to learn various distributions described in the previous section. Details of the learning and inference process are presented in this section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation and Inference", |
| "sec_num": "3" |
| }, |
| { |
| "text": "According to CATM, the total probability of training data D given hyperparameters \u03b1, \u03b2, \u03b3 and \u03b4 is computed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Probability of Training Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "P (D; \u03b1, \u03b2, \u03b3, \u03b4) = d P (f d , c d ; \u03b1, \u03b2, \u03b3, \u03b4) = d \u1ebd d P (\u1ebd d |\u03b1, \u03b2)P (f d |\u1ebd d , \u03b3)P (c d |\u1ebd d , \u03b4) = \u03c6 P (\u03c6|\u03b2) \u03c8 P (\u03c8|\u03b3) d \u1ebd d P (f d |\u1ebd d , \u03c8) \u00d7 \u03be P (\u03be|\u03b4) \u1ebd d P (\u1ebd d |\u1ebd d )p(c d |\u1ebd d , \u03be) \u00d7 \u03b8 P (\u03b8|\u03b1)P (\u1ebd d |\u03b8, \u03c6)d\u03b8d\u03bed\u03c8d\u03c6", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Probability of Training Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "(1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Probability of Training Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Probability of Training Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "f", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Probability of Training Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The joint distribution in Eq. (1) is intractable to compute because of coupled hyperparameters and hidden variables. Following Han et al, (2012), we adapt the well-known Gibbs sampling algorithm (Griffiths and Steyvers, 2004) to our model. We compute the joint posterior distribution of hidden variables, denoted by P (z,\u1ebd,\u1ebd |D), and then use this distribution to 1) estimate \u03b8, \u03c6, \u03c8 and \u03be, and 2) predict translations and topics of all documents in D. Specifically, we derive the joint posterior distribution from Eq. (1) as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "P (z,\u1ebd,\u1ebd |D) \u221d P (z)P (\u1ebd|z)P (f|\u1ebd)P (\u1ebd |\u1ebd)P (c|\u1ebd ) (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Based on the equation above, we construct a Markov chain that converges to P (z,\u1ebd,\u1ebd |D), where each state is an assignment of a hidden variable (including topic assignment to a topical word, target-side topical item assignment to a source topical or contextual word.). Then, we sequentially sample each assignment according to the following three conditional assignment distributions: 1. P (z i = z|z \u2212i ,\u1ebd,\u1ebd , D): topic assignment distribution of a topical word given z \u2212i that denotes all topic assignments but z i ,\u1ebd and\u1ebd that are target-side topical item assignments. It is updated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (z i = z|z \u2212i ,\u1ebd,\u1ebd , D) \u221d C DZ (\u2212i)dz + \u03b1 C DZ (\u2212i)d * +N z \u03b1 \u00d7 C Z\u1ebc (\u2212i)z\u1ebd + \u03b2 C Z\u1ebc (\u2212i)z * +N\u1ebd\u03b2", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where the topic assignment to a topical word is determined by the probability that this topic appears in document d (the 1st term) and the probability that the selected item\u1ebd occurs in this topic (the 2nd term).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "2. P (\u1ebd i =\u1ebd|z,\u1ebd \u2212i ,\u1ebd , D)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": ": target-side topical item assignment distribution of a source topical word given the current topic assignments z, the current item assignments of all other topical words\u1ebd \u2212i , and the current item assignments of contextual words\u1ebd . It is updated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (\u1ebd i =\u1ebd|z,\u1ebd \u2212i ,\u1ebd , D) \u221d C Z\u1ebc (\u2212i)z\u1ebd + \u03b2 C Z\u1ebc (\u2212i)z * + N\u1ebd\u03b2 \u00d7 C\u1ebc F (\u2212i)\u1ebdf + \u03b3 C\u1ebc F (\u2212i)\u1ebd * + N f \u03b3 \u00d7 ( C W\u1ebc (\u2212i)w\u1ebd + 1 C W\u1ebc (\u2212i)w\u1ebd ) C W\u1ebc w\u1ebd", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where the target-side topical item assignment to a topical word is determined by the probability that this item is from the topic z (the 1st term), the probability that this item is translated into the topical word f (the 2nd term) and the probability of contextual words within a w s word window centered at the topical word f , which influence the selection of the target-side topical item\u1ebd (the 3rd term). It is very important to note that we use a parallel corpus to train the model. Therefore we directly identify target-side topical items for source topical words via word alignments rather than sampling.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "3. P (\u1ebd i =\u1ebd|z,\u1ebd,\u1ebd \u2212i , D): target-side topical item assignment distribution for a contextual word given the current topic assignments z, the current item assignments of topical words\u1ebd, and the current item assignments of all other contextual words\u1ebd \u2212i . It is updated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (\u1ebd i =\u1ebd|z,\u1ebd,\u1ebd \u2212i , D) \u221d C W\u1ebc w\u1ebd C W\u1ebc w * \u00d7 C\u1ebc C (\u2212i)\u1ebdc + \u03b4 C\u1ebc C (\u2212i)\u1ebd * + N c \u03b4", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where the target-side topical item assignment used to generate a contextual word is determined by the probability of this item being assigned to generate contextual words within a surface window of size w s (the 1st term) and the probability that contextual words occur in the context of this item (the 2nd term).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In all above formulas, C DZ dz is the number of times that topic z has been assigned for all topical words in document d, C DZ d * = z C DZ dz is the topic number in document d, and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "C Z\u1ebc z\u1ebd , C\u1ebc F ef , C W\u1ebc w\u1ebd , C W\u1ebc", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "w\u1ebd and C\u1ebc C ec have similar explanations. Based on the above marginal distributions, we iteratively update all assignments of corpus D until the constructed Markov chain converges. Model parameters are estimated using these final assignments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation via Gibbs Sampling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "For a new document, we first predict its topics and target-side topical items using the incremental Gibbs sampling algorithm described in (Kataria et al., 2011) . In this algorithm, we iteratively update topic assignments and translation assignments of an unseen document following the same process described in Section 3.2, but with estimated model parameters.", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 160, |
| "text": "(Kataria et al., 2011)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference on Unseen Documents", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Once we obtain these assignments, we estimate lexical translation probabilities based on the sampled counts of target-side topical items. Formally, for the position i in the document corresponding to the content word f , we collect the sampled count that translation\u1ebd generates f , denoted by C sam (\u1ebd, f ). This count can be normalized to form a new translation probability in the following way:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference on Unseen Documents", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(\u1ebd|f ) = C sam (\u1ebd, f ) + k C sam + k \u2022 N\u1ebd ,f", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Inference on Unseen Documents", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where C sam is the total number of samples during inference and N\u1ebd ,f is the number of candidate translations of f . Here we apply add-k smoothing to refine this translation probability, where k is a tunable global smoothing constant. Under the framework of log-linear model (Och and Ney, 2002) , we use this translation probability as a new feature to improve lexical selection in SMT.", |
| "cite_spans": [ |
| { |
| "start": 275, |
| "end": 294, |
| "text": "(Och and Ney, 2002)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference on Unseen Documents", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In order to examine the effectiveness of our model, we carried out several groups of experiments on Chinese-to-English translation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Our bilingual training corpus is from the FBIS corpus and the Hansards part of LDC2004T07 corpus (1M parallel sentences, 54.6K documents, with 25.2M Chinese words and 29M English words). We first used ZPar toolkit 2 and Stanford toolkit 3 to preprocess (i.e., word segmenting, PoS tagging) the Chinese and English parts of training corpus, and then word-aligned them using GIZA++ (Och and Ney, 2003) with the option \"grow-diag-final-and\". We chose the NIST evaluation set of MT05 as the development set, and the sets of MT06/MT08 as test sets. On average, these three sets contain 17.2, 13.9 and 14.1 content words per sentence, respectively. We trained a 5-gram language model on the Xinhua portion of Gigaword corpus using the SRILM Toolkit (Stolcke, 2002 ).", |
| "cite_spans": [ |
| { |
| "start": 380, |
| "end": 399, |
| "text": "(Och and Ney, 2003)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 743, |
| "end": 757, |
| "text": "(Stolcke, 2002", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Our baseline system is a state-of-the-art SMT system, which adapts bracketing transduction grammars (Wu, 1997) to phrasal translation and equips itself with a maximum entropy based reordering model (MEBTG) (Xiong et al., 2006) . We used the toolkit 4 developed by Zhang (2004) to train the reordering model with the following parameters: iteration number iter=200 and Gaussian prior g=1.0. During decoding, we set the ttable-limit as 20, the stack-size as 100. The translation quality is evaluated by case-insensitive BLEU-4 (Papineni et al., 2002) metric. Finally, we conducted paired bootstrap sampling (Koehn, 2004) to test the significance in BLEU score differences. To train CATM, we set the topic number N z as 25. 5 For hyperparameters \u03b1 and \u03b2, we empirically set \u03b1=50/N z and \u03b2=0.1, as implemented in (Griffiths and Steyvers, 2004) . Following Han et al. (2012) , we set \u03b3 and \u03b4 as 1.0/N f and 2000/N c , respectively. During the training process, we ran 400 iterations of the Gibbs sampling algorithm. For documents to be translated, we first ran 300 rounds in a burn-in step to let the probability distributions converge, and then ran 1500 rounds where we collected independent samples every 5 rounds. The longest training time of CATM is less than four days on our server using 4GB RAM and one core of 3.2GHz CPU. As for the smoothing constant k in Eq. 6, we set its values to 0.5 according to the performance on the development set in additional experiments.", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 110, |
| "text": "(Wu, 1997)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 206, |
| "end": 226, |
| "text": "(Xiong et al., 2006)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 264, |
| "end": 276, |
| "text": "Zhang (2004)", |
| "ref_id": null |
| }, |
| { |
| "start": 525, |
| "end": 548, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": null |
| }, |
| { |
| "start": 605, |
| "end": 618, |
| "text": "(Koehn, 2004)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 809, |
| "end": 839, |
| "text": "(Griffiths and Steyvers, 2004)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 852, |
| "end": 869, |
| "text": "Han et al. (2012)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Our first group of experiments were conducted on the development set to investigate the impact of the window size w s . We gradually varied window size from 6 to 14 with an increment of 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Impact of Window Size w s", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Experiment results are shown in Table 2 . We achieve the best performance when w s =12. This suggests that a ?12-word window context is sufficient for predicting target-side translations for ambiguous source-side topical words. We therefore set w s =12 for all experiments thereafter.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 32, |
| "end": 39, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of Window Size w s", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In the second group of experiments, in addition to the conventional MEBTG system, we also compared CATM with the following two models:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overall Performance", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Word Sense Disambiguation Model (WSDM) (Chan et al., 2007) . This model improves lexical selection in SMT by exploiting local contexts. For each content word, we construct a MaxEnt-based classifier incorporating local collocation and surrounding word features, which are also adopted by Chan et al. (2007) . For each candidate translatio\u00f1 e of topical word f , we use WSDM to estimate the context-specific translation probability P (\u1ebd|f ), which is used as a new feature in SMT system.", |
| "cite_spans": [ |
| { |
| "start": 39, |
| "end": 58, |
| "text": "(Chan et al., 2007)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 287, |
| "end": 305, |
| "text": "Chan et al. (2007)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overall Performance", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Topic-specific Lexicon Translation Model (TLTM) (Zhao and Xing, 2007) . This model focuses on the utilization of document-level context. We adapted it to estimate a lexicon translation probability as follows:", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 69, |
| "text": "(Zhao and Xing, 2007)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overall Performance", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(f |\u1ebd, d) \u221d p(\u1ebd|f, d) \u2022 p(f |d) = z p(\u1ebd|f, z) \u2022 p(f |z) \u2022 p(z|d)", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Overall Performance", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where p(\u1ebd|f, z) is the lexical translation probability conditioned on topic z, which can be calculated according to the principle of maximal likelihood, p(f |z) is the generation probability of word f from topic z, and p(z|d) denotes the posterior topic distribution of document d.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overall Performance", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Note that our CATM is proposed for lexical selection on content words. To show the strong effectiveness of our model, we also compared it against the full-fledged variants of the above-mentioned two models that are built for all source words. We refer to them as WSDM (All) and TLTM (All), respectively. Table 3 displays BLEU scores of different lexical selection models. All models outperform the baseline. Although we only use CATM to predict translations for content words, CATM achieves an average BLEU score of 26.77 on the two test sets, which is higher than that of the baseline by 1.18 BLEU points. This improvement is statistically significant at p<0.01. Furthermore, we also find that our model performs better than WSDM and TLTM with significant improvements. Finally, even if WSDM (All) and TLTM (all) are built for all source words, they are still no better than than CATM that selects desirable translations for content words. These experiment results strongly demonstrate the advantage of CATM over previous lexical selection models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 304, |
| "end": 311, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Overall Performance", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In order to investigate why CATM is able to outperform previous models that explore only local contex- Table 3 : Experiment results on the test sets. Avg = average BLEU scores. WSDM (All) and TLTM (All) are models built for all source words. \u2193: significantly worse than CATM (p<0.05), \u2193\u2193: significantly worse than CATM (p<0.01) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 103, |
| "end": 110, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "tual words or global topics, we take a deep look into topics, topical items and contextual words learned by CATM and empirically analyze the effect of modeling correlations between local contextual words and global topics on lexical selection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We present some examples of topics learned by CATM in Table 4 . We also list five target-side topical items with the highest probabilities for each topic, and the most probable five contextual words for each target-side topical item. These examples clearly show that target-side topical items tightly connect global topics and local contextual words by capturing their correlations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 54, |
| "end": 61, |
| "text": "Table 4", |
| "ref_id": "TABREF9" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Outputs of CATM", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Compared to previous lexical selection models, CATM jointly models both local contextual words and global topics. Such a joint modeling also enables CATM to capture their inner correlations at the model level. In order to examine the effect of correlation modeling on lexical selection, we compared CATM with its three variants: CATM (Context) that only uses local context information. We determined target-side topical items for content words in this variant by setting the probability distribution that a topic generates a target-side topical item to be uniform; CATM (Topic) that explores only global topic information. We identified target-side topical items for content words in the model by setting w s as 0, i.e., no local contextual words being used at all.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Correlation Modeling", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "CATM (Log-linear) is the combination of the above-mentioned two variants ( and ) in a log-linear manner, which does not capture correlations between local contextual words and global topics at the model level. Results in Table 5 show that CATM performs significantlly better than both CATM (Topic) and CAT-M (Context). Even compared with CATM (Loglinear), CATM still achieves a significant improvement of 0.35 BLEU points (p<0.05). This validates the effectiveness of capturing correlations for lexical selection at the model level.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 221, |
| "end": 228, |
| "text": "Table 5", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Correlation Modeling", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Our work is partially inspired by (Han and Sun, 2012) , where an entity-topic model is presented for entity linking. We successfully adapt this work to lexical selection in SMT. The related work mainly includes the following two strands.", |
| "cite_spans": [ |
| { |
| "start": 34, |
| "end": 53, |
| "text": "(Han and Sun, 2012)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "(1) Lexical Selection in SMT. In order to explore rich context information for lexical selection, some researchers propose trigger-based lexicon models to capture long-distance dependencies (Hasan et al., 2008; Mauser et al., 2009) , and many more researchers build classifiers to select desirable translations during decoding (Chan et al., 2007; Carpuat and Wu, 2007; . Along this line, Shen et al. (2009) introduce four new linguistic and contextual features for translation selection in SMT. Recently, we have witnessed an increasing efforts in exploiting document-level context information to improve lexical selection. Xiao et al. (2011) : Examples of topics, topical items and contextual words learned by CATM with N z =25 and W s =12. Chinese words that do not have direct English translations are denoted with \"*\". Here \"q\" and \"|\" are Chinese quantifiers for missile and war, respectively; \"\u00fc\" and \"W\" together means cross-starit.", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 210, |
| "text": "(Hasan et al., 2008;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 211, |
| "end": 231, |
| "text": "Mauser et al., 2009)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 327, |
| "end": 346, |
| "text": "(Chan et al., 2007;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 347, |
| "end": 368, |
| "text": "Carpuat and Wu, 2007;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 388, |
| "end": 406, |
| "text": "Shen et al. (2009)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 624, |
| "end": 642, |
| "text": "Xiao et al. (2011)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "the document-level translation consistency. Ture et al. (2012) soften this consistency constraint by integrating three counting features into decoder. Also relevant is the work of Xiong et al.(2013) , who use three different models to capture lexical cohesion for document-level SMT.", |
| "cite_spans": [ |
| { |
| "start": 180, |
| "end": 198, |
| "text": "Xiong et al.(2013)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "(2) SMT with Topic Models. In this strand, Xing (2006, 2007) first present a bilingual topical admixture formalism for word alignment in SMT. Tam et al. (2007) and Ruiz et al. (2012) apply topic model into language model adaptation. Su et al. (2012) conduct translation model adaptation with monolingual topic information. Gong et al. (2010) and Xiao et al. (2012) introduce topic-based similarity models to improve SMT system. Axelrod et al. (2012) build topic-specific translation models from the TED corpus and select topic-relevant data from the UN corpus to improve coverage. Eidelman et al. (2012) incorporate topic-specific lexical weights into translation model. Hewavitharana et al. (2013) propose an incremental topic based translation model adaptation approach that satisfies the causality constraint imposed by spoken conversations. present a new bilingual variant of LDA to compute topic-adapted, probabilistic phrase translation features. They also use a topic model to learn latent distributional representations of different context levels of a phrase pair (Hasler et al., 2014b) .", |
| "cite_spans": [ |
| { |
| "start": 43, |
| "end": 60, |
| "text": "Xing (2006, 2007)", |
| "ref_id": null |
| }, |
| { |
| "start": 142, |
| "end": 159, |
| "text": "Tam et al. (2007)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 164, |
| "end": 182, |
| "text": "Ruiz et al. (2012)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 233, |
| "end": 249, |
| "text": "Su et al. (2012)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 323, |
| "end": 341, |
| "text": "Gong et al. (2010)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 346, |
| "end": 364, |
| "text": "Xiao et al. (2012)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 428, |
| "end": 449, |
| "text": "Axelrod et al. (2012)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 581, |
| "end": 603, |
| "text": "Eidelman et al. (2012)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 671, |
| "end": 698, |
| "text": "Hewavitharana et al. (2013)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1073, |
| "end": 1095, |
| "text": "(Hasler et al., 2014b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In the studies mentioned above, those by Zhao and Xing (2006) , Zhao and Xing (2007) , Hasler et al. (2014a) , and Hasler et al. (2014b) are most related to our work. However, they all perform dynamic translation model adaptation with topic models. Significantly different from them, we propose a new topic model that exploits both local contextual words and global topics for lexical selection. To the best of our knowledge, this is first attempt to capture correlations between local words and global topics for better lexical selection at the model level.", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 61, |
| "text": "Zhao and Xing (2006)", |
| "ref_id": null |
| }, |
| { |
| "start": 64, |
| "end": 84, |
| "text": "Zhao and Xing (2007)", |
| "ref_id": null |
| }, |
| { |
| "start": 87, |
| "end": 108, |
| "text": "Hasler et al. (2014a)", |
| "ref_id": null |
| }, |
| { |
| "start": 111, |
| "end": 136, |
| "text": "and Hasler et al. (2014b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "This paper has presented a novel context-aware topic model for lexical selection in SMT. Jointly modeling local contexts, global topics and their correlations in a unified framework, our model provides an effective way to capture context information at different levels for better lexical selection in SMT. Experiment results not only demonstrate the effectiveness of the proposed topic model, but also show that lexical selection benefits from correlation modeling.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In the future, we want to extend our model from the word level to the phrase level. We also plan to improve our model with monolingual corpora.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In the following description, Dir(.), M ult(.) and U nif (.) denote Dirichlet, Multinomial and Uniform distributions, re-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://people.sutd.edu.sg/\u223cyue zhang/doc/index.html 3 http://nlp.stanford.edu/software 4 http://homepages.inf.ed.ac.uk/lzhang10/maxenttoolkit.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We try different topic numbers from 25 to 100 with an increment of 25 each time. We find that Nz=25 produces a slightly better performance than other values on the development set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The authors were supported by National Natural Science Foundation of China (Grant Nos 61303082 (Grant No. 1301021018). We also thank the anonymous reviewers for their insightful comments.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 75, |
| "end": 94, |
| "text": "(Grant Nos 61303082", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "New methods and evaluation experiments on translating TED talks in the I-WSLT benchmark", |
| "authors": [ |
| { |
| "first": "Amittai", |
| "middle": [], |
| "last": "Axelrod", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Acero", |
| "suffix": "" |
| }, |
| { |
| "first": "Mei-Yuh", |
| "middle": [], |
| "last": "Hwang", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of ICASSP 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "4945--4648", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amittai Axelrod, Xiaodong He, Li Deng, Alex Acero, and Mei-Yuh Hwang. 2012. New methods and eval- uation experiments on translating TED talks in the I- WSLT benchmark. In Proc. of ICASSP 2012, pages 4945-4648.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A Semantic Feature for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Rafael", |
| "middle": [ |
| "E" |
| ], |
| "last": "Banchs", |
| "suffix": "" |
| }, |
| { |
| "first": "Marta", |
| "middle": [ |
| "R" |
| ], |
| "last": "Costa-Juss\u00e0", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. of SSSST-5 2011", |
| "volume": "", |
| "issue": "", |
| "pages": "126--134", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rafael E. Banchs and Marta R. Costa-juss\u00e0. 2011. A Semantic Feature for Statistical Machine Translation. In Proc. of SSSST-5 2011, pages 126-134.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Latent Dirichlet Allocation. Journal of Machine Learning", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [ |
| "M" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "993--1022", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David M. Blei. 2003. Latent Dirichlet Allocation. Jour- nal of Machine Learning, pages 993-1022.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Improving Statistical Machine Translation Using Word Sense Disambiguation", |
| "authors": [ |
| { |
| "first": "Marine", |
| "middle": [], |
| "last": "Carpuat", |
| "suffix": "" |
| }, |
| { |
| "first": "Dekai", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "61--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marine Carpuat and Dekai Wu. 2007. Improving Statis- tical Machine Translation Using Word Sense Disam- biguation. In Proc. of EMNLP 2007, pages 61-72.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Word Sense Disambiguation Improves Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Yee", |
| "middle": [], |
| "last": "Seng Chan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "33--40", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yee Seng Chan, Hwee Tou Ng, and David Chiang. 2007. Word Sense Disambiguation Improves Statistical Ma- chine Translation. In Proc. of ACL 2007, pages 33-40.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Hierarchical Phrase-Based Translation", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "201--228", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Chiang. 2007. Hierarchical Phrase-Based Trans- lation. Computational Linguistics, pages 201-228.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Better Hypothesis Testing for Statistical Machine Translation: Controlling for Optimizer Instability", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [ |
| "H" |
| ], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. of ACL 2011, short papers", |
| "volume": "", |
| "issue": "", |
| "pages": "176--181", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan H. Clark, Chris Dyer, Alon Lavie, and Noah A. Smith. 2011. Better Hypothesis Testing for Statis- tical Machine Translation: Controlling for Optimizer Instability. In Proc. of ACL 2011, short papers, pages 176-181.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Translation Quality Using Ngram Cooccurrence Statistics", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Doddington", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proc. of HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "138--145", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Doddington. 2002. Translation Quality Using N- gram Cooccurrence Statistics. In Proc. of HLT 2002, 138-145.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Topic Models for Dynamic Translation Model Adaptation", |
| "authors": [ |
| { |
| "first": "Vladimir", |
| "middle": [], |
| "last": "Eidelman", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boyd-Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of ACL 2012, Short Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "115--119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vladimir Eidelman, Jordan Boyd-Graber, and Philip Resnik. 2012. Topic Models for Dynamic Transla- tion Model Adaptation. In Proc. of ACL 2012, Short Papers, pages 115-119.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Scalable Inference and Training of Context-Rich Syntactic Translation Models", |
| "authors": [ |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Graehl", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Deneefe", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ignacio", |
| "middle": [], |
| "last": "Thayer", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "961--968", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michel Galley, Jonathan Graehl, Kevin Knight, Daniel Marcu, Steve DeNeefe, Wei Wang, and Ignacio Thay- er. 2006. Scalable Inference and Training of Context- Rich Syntactic Translation Models. In Proc. of ACL 2006, pages 961-968.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Improve SMT with Source-side Topic-Document Distributions", |
| "authors": [ |
| { |
| "first": "Zhengxian", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| }, |
| { |
| "first": "Guodong", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. of SUMMIT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhengxian Gong and Guodong Zhou. 2010. Improve SMT with Source-side Topic-Document Distributions. In Proc. of SUMMIT 2010.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Finding Scientific Topics", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Steyvers", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proc. of the National Academy of Sciences", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas L. Griffiths and Mark Steyvers. 2004. Finding Scientific Topics. In Proc. of the National Academy of Sciences 2004.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "An Entity-Topic Model for Entity Linking", |
| "authors": [ |
| { |
| "first": "Xianpei", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Le", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of EMNLP 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "105--115", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xianpei Han and Le Sun. 2012. An Entity-Topic Model for Entity Linking. In Proc. of EMNLP 2012, pages 105-115.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Triplet Lexicon Models for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Sa\u0161a", |
| "middle": [], |
| "last": "Hasan", |
| "suffix": "" |
| }, |
| { |
| "first": "Juri", |
| "middle": [], |
| "last": "Ganitkevitch", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| }, |
| { |
| "first": "Jes\u00fas", |
| "middle": [], |
| "last": "Andr\u00e9s-Ferrer", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "372--381", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sa\u0161a Hasan, Juri Ganitkevitch, Hermann Ney, and Jes\u00fas Andr\u00e9s-Ferrer 2008. Triplet Lexicon Models for S- tatistical Machine Translation. In Proc. of EMNLP 2008, pages 372-381.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Dynamic Topic Adaptation for Phrase-based MT", |
| "authors": [ |
| { |
| "first": "Eva", |
| "middle": [], |
| "last": "Hasler", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "328--337", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eva Hasler, Phil Blunsom, Philipp Koehn, and Bar- ry Haddow. 2014. Dynamic Topic Adaptation for Phrase-based MT. In Proc. of EACL 2014, pages 328- 337.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Dynamic Topic Adaptation for SMT using Distributional Profiles", |
| "authors": [ |
| { |
| "first": "Eva", |
| "middle": [], |
| "last": "Hasler", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. of WMT", |
| "volume": "", |
| "issue": "", |
| "pages": "445--456", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eva Hasler, Phil Blunsom, Philipp Koehn, and Barry Haddow. 2014. Dynamic Topic Adaptation for SMT using Distributional Profiles. In Proc. of WMT 2014, pages 445-456.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Improving Statistical Machine Translation using Lexicalized Rule Selection", |
| "authors": [ |
| { |
| "first": "Zhongjun", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shouxun", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "321--328", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhongjun He, Qun Liu, and Shouxun Lin. 2008. Improv- ing Statistical Machine Translation using Lexicalized Rule Selection. In Proc. of COLING 2008, pages 321- 328.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Incremental Topic-based TM Adaptation for Conversational SLT", |
| "authors": [ |
| { |
| "first": "Sanjika", |
| "middle": [], |
| "last": "Hewavitharana", |
| "suffix": "" |
| }, |
| { |
| "first": "Dennis", |
| "middle": [], |
| "last": "Mehay", |
| "suffix": "" |
| }, |
| { |
| "first": "Sankaranarayanan", |
| "middle": [], |
| "last": "Ananthakrishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "Prem", |
| "middle": [], |
| "last": "Natarajan", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. of ACL 2013, Short Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "697--701", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sanjika Hewavitharana, Dennis Mehay, Sankara- narayanan Ananthakrishnan, and Prem Natarajan. 2013. Incremental Topic-based TM Adaptation for Conversational SLT. In Proc. of ACL 2013, Short Papers, pages 697-701.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Entity Disambiguation with Hierarchical Topic Models", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Saurabh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kataria", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Krishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajeev", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rastogi", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. of KDD", |
| "volume": "", |
| "issue": "", |
| "pages": "1037--1045", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saurabh S. Kataria, Krishnan S. Kumar, and Rajeev Ras- togi. 2011. Entity Disambiguation with Hierarchical Topic Models. In Proc. of KDD 2011, pages 1037- 1045.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Statistical Phrase-based Translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Franz", |
| "middle": [ |
| "Josef" |
| ], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proc. of NAACL-HLT 2003", |
| "volume": "", |
| "issue": "", |
| "pages": "127--133", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Franz Josef Och, and Daniel Marcu. 2003. Statistical Phrase-based Translation. In Proc. of NAACL-HLT 2003, pages 127-133.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Statistical Significance Tests for Machine Translation Evaluation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "388--395", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2004. Statistical Significance Tests for Machine Translation Evaluation. In Proc. of EMNLP 2004, pages 388-395.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Maximum Entropy based Rule Selection Model for Syntax-based Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongjun", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shouxun", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "89--97", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qun Liu, Zhongjun He, Yang Liu, and Shouxun Lin. 2008. Maximum Entropy based Rule Selection Model for Syntax-based Statistical Machine Translation. In Proc. of EMNLP 2008, pages 89-97.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Extending Statistical Machine Translation with Discriminative and Trigger-based Lexicon Models", |
| "authors": [ |
| { |
| "first": "Arne", |
| "middle": [], |
| "last": "Mauser", |
| "suffix": "" |
| }, |
| { |
| "first": "Sa\u0161a", |
| "middle": [], |
| "last": "Hasan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of EMNLP 2009", |
| "volume": "", |
| "issue": "", |
| "pages": "210--218", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arne Mauser, Sa\u0161a Hasan, and Hermann Ney. 2009. Ex- tending Statistical Machine Translation with Discrim- inative and Trigger-based Lexicon Models. In Proc. of EMNLP 2009, pages 210-218.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Discriminative Training and Maximum Entropy Models for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Franz", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proc. of ACL 2002", |
| "volume": "", |
| "issue": "", |
| "pages": "295--302", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franz Joseph Och and Hermann Ney. 2002. Discrimi- native Training and Maximum Entropy Models for S- tatistical Machine Translation. In Proc. of ACL 2002, pages 295-302.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A Systematic Comparison of Various Statistical Alignment Models", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Franz", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "29", |
| "pages": "19--51", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franz Joseph Och and Hermann Ney. 2003. A Systemat- ic Comparison of Various Statistical Alignment Mod- els. Computational Linguistics, 2003(29), pages 19- 51.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Minimum Error Rate Training in Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Franz Josef", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proc. of ACL 2003", |
| "volume": "", |
| "issue": "", |
| "pages": "160--167", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franz Josef Och. 2003. Minimum Error Rate Training in Statistical Machine Translation. In Proc. of ACL 2003, pages 160-167.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "The Alignment Template Approach to Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Franz", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "30", |
| "pages": "417--449", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franz Joseph Och and Hermann Ney. 2004. The Align- ment Template Approach to Statistical Machine Trans- lation. Computational Linguistics, 2004(30), pages 417-449.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "BLEU: A Method for Automatic Evaluation of Machine Translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of ACL 2002", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2007. BLEU: A Method for Automatic Evaluation of Machine Translation. In Proc. of ACL 2002, pages 311-318.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Topic Adaptation for Lecture Translation through Bilingual Latent Semantic Models", |
| "authors": [ |
| { |
| "first": "Nick", |
| "middle": [], |
| "last": "Ruiz", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of the Sixth Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "294--302", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nick Ruiz and Marcello Federico. 2012. Topic Adapta- tion for Lecture Translation through Bilingual Latent Semantic Models. In Proc. of the Sixth Workshop on Statistical Machine Translation, pages 294-302.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Effective Use of Linguistic and Contextual Information for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Libin", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinxi", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Spyros", |
| "middle": [], |
| "last": "Matsoukas", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Weischedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of EMNLP 2009", |
| "volume": "", |
| "issue": "", |
| "pages": "72--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Libin Shen, Jinxi Xu, Bing Zhang, Spyros Matsoukas, and Ralph Weischedel. 2009. Effective Use of Lin- guistic and Contextual Information for Statistical Ma- chine Translation. In Proc. of EMNLP 2009, pages 72-80.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Srilm -An Extensible Language Modeling Toolkit", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proc. of ICSLP 2002", |
| "volume": "", |
| "issue": "", |
| "pages": "901--904", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Stolcke. 2002. Srilm -An Extensible Language Modeling Toolkit. In Proc. of ICSLP 2002, pages 901- 904.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Translation Model Adaptation for Statistical Machine Translation with Monolingual Topic Information", |
| "authors": [ |
| { |
| "first": "Jinsong", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yidong", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Huailin", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of ACL 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "459--468", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinsong Su, Hua Wu, Haifeng Wang, Yidong Chen, Xi- aodong Shi, Huailin Dong, and Qun Liu. 2012. Trans- lation Model Adaptation for Statistical Machine Trans- lation with Monolingual Topic Information. In Proc. of ACL 2012, pages 459-468.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Bilingual LSA-based adaptation for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Yik-Cheung", |
| "middle": [], |
| "last": "Tam", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [ |
| "R" |
| ], |
| "last": "Lane", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Schultz", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Machine Translation", |
| "volume": "21", |
| "issue": "4", |
| "pages": "187--207", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yik-Cheung Tam, Ian R. Lane, and Tanja Schultz. 2007. Bilingual LSA-based adaptation for statistical machine translation. Machine Translation, 21(4), pages 187- 207.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Encouraging Consistent Translation Choices", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ferhan Ture", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Douglasw", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Oard", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of NAACL-HLT 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "417--426", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ferhan Ture, DouglasW. Oard, and Philip Resnik. 2012. Encouraging Consistent Translation Choices. In Proc. of NAACL-HLT 2012, pages 417-426.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Stochastic inversion transduction grammars and bilingual parsing of parallel corpora", |
| "authors": [ |
| { |
| "first": "Dekai", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Computational Linguistics", |
| "volume": "23", |
| "issue": "3", |
| "pages": "377--403", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dekai Wu. 1997. Stochastic inversion transduction grammars and bilingual parsing of parallel corpora. Computational Linguistics, 23(3):377-403.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Document-level Consistency Verification in Machine Translation", |
| "authors": [ |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingbo", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shujie", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. of MT SUMMIT 2011", |
| "volume": "", |
| "issue": "", |
| "pages": "131--138", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tong Xiao, Jingbo Zhu, Shujie Yao, and Hao Zhang. 2011. Document-level Consistency Verification in Machine Translation. In Proc. of MT SUMMIT 2011, pages 131-138.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "A Topic Similarity Model for Hierarchical Phrase-based Translation", |
| "authors": [ |
| { |
| "first": "Xinyan", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shouxun", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of ACL 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "750--758", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xinyan Xiao, Deyi Xiong, Min Zhang, Qun Liu, and Shouxun Lin. 2012. A Topic Similarity Model for Hi- erarchical Phrase-based Translation. In Proc. of ACL 2012, pages 750-758.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Maximum Entropy Based Phrase Reordering Model for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shouxun", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "521--528", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deyi Xiong, Qun Liu, and Shouxun Lin. 2006. Maxi- mum Entropy Based Phrase Reordering Model for S- tatistical Machine Translation. In Proc. of ACL 2006, pages 521-528.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Modeling Lexical Cohesion for Document-Level Machine Translation", |
| "authors": [ |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Guosheng", |
| "middle": [], |
| "last": "Ben", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yajuan", |
| "middle": [], |
| "last": "L\u00fc", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. of IJ-CAI 2013", |
| "volume": "", |
| "issue": "", |
| "pages": "2183--2189", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deyi Xiong, Guosheng Ben, Min Zhang, Yajuan L\u00fc, and Qun Liu. 2013. Modeling Lexical Cohesion for Document-Level Machine Translation. In Proc. of IJ- CAI 2013, pages 2183-2189.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "A Sense-Based Translation Model for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1459--1469", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deyi Xiong and Min Zhang. 2014. A Sense-Based Translation Model for Statistical Machine Translation. In Proc. of ACL 2014, pages 1459-1469.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "BiTAM: Bilingual Topic AdMixture Models for Word Alignment", |
| "authors": [ |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "P" |
| ], |
| "last": "Xing", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of ACL/COLING 2006", |
| "volume": "", |
| "issue": "", |
| "pages": "969--976", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bing Zhao and Eric P.Xing. 2006. BiTAM: Bilingual Topic AdMixture Models for Word Alignment. In Proc. of ACL/COLING 2006, pages 969-976.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "HM-BiTAM: Bilingual Topic Exploration, Word Alignment, and Translation", |
| "authors": [ |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "P" |
| ], |
| "last": "Xing", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of NIPS 2007", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bing Zhao and Eric P.Xing. 2007. HM-BiTAM: Bilin- gual Topic Exploration, Word Alignment, and Trans- lation. In Proc. of NIPS 2007, pages 1-8.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Sample a topic distribution \u03b8 d \u223cDir(\u03b1). 2. For each position i that corresponds to a topical word f i in the document: (a) Sample a topic z i \u223cM ult(\u03b8 d ). (b) Conditioned on the topic z i , sample a target-side topical item\u1ebd i \u223cM ult(\u03c6 z i ). (c) Conditioned on the target-side topical item\u1ebd i , sample the topical word f i \u223cM ult(\u03c8\u1ebd i ). 3. For each position j that corresponds to a contextual word c j in the document: (a) Collect all target-side topical items\u1ebd s that are translations of neighboring topical words within a window centered at c j (window size w s ). (b) Randomly sample an item from\u1ebd s , e j \u223cU nif (\u1ebd s ). (c) Conditioned on the sampled target-side topical item\u1ebd j , sample the contextual word c j \u223cM ult(\u03be\u1ebd j ). To better illustrate CATM, let us revisit the example inFigure 1. We describe how CATM generates top-Graphical representation of our model." |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "d and\u1ebd d denote the sets of topical words and their target-side topical item assignments in document d, c d and\u1ebd d are the sets of contextual words and their target-side topical item assignments in document d." |
| }, |
| "TABREF0": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "exploit very limited context information contained in bilingual rules for lexical selection. * Corresponding author. {stance, attitude ...} topic, Politics topic ...] {problem, issue ...} w\u00e8nt\u00ed", |
| "content": "<table/>" |
| }, |
| "TABREF2": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "Notations in CATM.", |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "Experiment results on the development set using different window sizes w s .", |
| "content": "<table/>" |
| }, |
| "TABREF7": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td>: Experiment results on the test sets. CATM (Log-</td></tr><tr><td>linear) is the combination of CATM (Context) and CATM</td></tr><tr><td>(Topic) in a log-linear manner.</td></tr></table>" |
| }, |
| "TABREF8": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "impose a hard constraint to guarantee", |
| "content": "<table><tr><td>Topic</td><td>Target-side Topical Items</td><td/><td colspan=\"3\">Source-side Contextual Words</td></tr><tr><td/><td>UNHCR</td><td colspan=\"4\">J\u00ac(refugee) \u2022\u00af?(office) ; (commissioner)\u00af\u00d6(affair) p?(high-level)</td></tr><tr><td/><td>republic</td><td colspan=\"2\">\u00e9 \u2020(union) \u00ac\u00cc(democracy)</td><td/><td>?(government) \u017ed=(Islam) \u00a5\u0161(Central Africa)</td></tr><tr><td>refugee</td><td>refugee</td><td colspan=\"4\">J\u00ac(refugee)\u02c6\u00a3(return) 6l\"\u00a4(displaced) e\u02c6(repatriate)</td><td>o(protect)</td></tr><tr><td/><td>Kosovo</td><td colspan=\"4\">r\u00f7Fae(Metohija)\u00b8S(territory)\u02c6\u00c5(crisis) \u00db\u00b3(situation) l 'ae(Serbia)</td></tr><tr><td/><td>federal</td><td colspan=\"4\">\u00daI(republic) Hd.\u00c5(Yugoslavia) \u2030\u00a2\u00bb(Kosovo)</td><td>?(government)</td><td>\u00db(authority)</td></tr><tr><td/><td>military</td><td>*</td><td colspan=\"3\">(observer) 1\u00c4(action) {I(USA) < (personnel) \u00dc\u00e8(army)</td></tr><tr><td/><td>missile</td><td/><td colspan=\"3\">\"\"(defense) X\u00da(system) {I(USA) u (launch) q(*)</td></tr><tr><td>military</td><td>United States</td><td colspan=\"2\">\u00a5I(China) F (Japan)</td><td colspan=\"2\">(Taiwan)</td><td>\u00af(military) NMD(National Missile Defense)</td></tr><tr><td/><td>system</td><td colspan=\"4\">\u00e9\u00dcI(United Nations) \u00ef\u00e1(build) I(country) I[(country) &E(information)</td></tr><tr><td/><td>war</td><td/><td colspan=\"3\">\u00d4 (war) |( * ) -.(world) u\u00c4(wage)\u00b0 (gulf)</td></tr><tr><td/><td>country</td><td colspan=\"4\">u\u00d0\u00a5(developing) u\u02c6(developed) \u0161\u00b3(Africa) u\u00d0(development) \u00a5(China)</td></tr><tr><td/><td>development</td><td colspan=\"4\">OE\u00b1Y(sustainable) \u00b2L(economy) r?(promote)</td><td>\u00ac(society)\u00af (situation)</td></tr><tr><td>economy</td><td>international</td><td colspan=\"4\">\u00ac(society) |\"(organization) \u00dc\u0160(coorporation) I[(country) \u00e9\u00dcI(United Nations)</td></tr><tr><td/><td>economic</td><td colspan=\"4\">\u00ac(society) u\u00d0(development) O\u2022(growth) I[(country)</td><td>\u00a5z(globalization)</td></tr><tr><td/><td>trade</td><td colspan=\"4\">u\u00d0(development) IS(international) -.(world) \u00dd](investment) :(point)</td></tr><tr><td/><td>Taiwan</td><td colspan=\"3\">\u00a5I(China) OE\u00ba(mainland)</td><td>\u00db(authority) {I(USA) \u00d3oe(compatriot)</td></tr><tr><td/><td colspan=\"4\">China`(say) {I(USA)</td><td>(Taiwan)</td><td>K(principle) \u00fc(*)</td></tr><tr><td>cross-strait</td><td>relation</td><td/><td colspan=\"3\">u\u00d0(development) W(*) \u00a5(China) \u00fc(*) I(country)</td></tr><tr><td>relation</td><td>cross-strait</td><td/><td colspan=\"2\">\u00fc(*) 'X(relation)</td><td>(Taiwan) W(*)</td><td>6(exchange)</td></tr><tr><td/><td>issue</td><td colspan=\"4\">)\u00fb(settlement) ?\u00d8(discuss)\u00afK(issue) - \u2021(important)</td><td>(Taiwan)</td></tr></table>" |
| }, |
| "TABREF9": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |