| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:33:47.022200Z" |
| }, |
| "title": "A Test Suite for Evaluating Discourse Phenomena in Document-level Neural Machine Translation", |
| "authors": [ |
| { |
| "first": "Xinyi", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tianjin University", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "xinyicai@tju.edu.cn" |
| }, |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tianjin University", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "dyxiong@tju.edu.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The need to evaluate the ability of contextaware neural machine translation (NMT) models in dealing with specific discourse phenomena arises in document-level NMT. However, test sets that satisfy this need are rare. In this paper, we propose a test suite to evaluate three common discourse phenomena in English-Chinese translation: pronoun, discourse connective and ellipsis where discourse divergences lie across the two languages. The test suite contains 1,200 instances, 400 for each type of discourse phenomena. We perform both automatic and human evaluation with three state-of-the-art context-aware NMT models on the proposed test suite. Results suggest that our test suite can be used as a challenging benchmark test bed for evaluating document-level NMT. The test suite will be publicly available soon.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The need to evaluate the ability of contextaware neural machine translation (NMT) models in dealing with specific discourse phenomena arises in document-level NMT. However, test sets that satisfy this need are rare. In this paper, we propose a test suite to evaluate three common discourse phenomena in English-Chinese translation: pronoun, discourse connective and ellipsis where discourse divergences lie across the two languages. The test suite contains 1,200 instances, 400 for each type of discourse phenomena. We perform both automatic and human evaluation with three state-of-the-art context-aware NMT models on the proposed test suite. Results suggest that our test suite can be used as a challenging benchmark test bed for evaluating document-level NMT. The test suite will be publicly available soon.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Document-level NMT has attracted extensive interest in recent years. Different from sentence-level NMT models, discourse-level models need to not only cope with intra-sentence dependencies, but also incorporate context beyond current sentence into context-aware translation. Inter-sentence links usually exhibit a wide variety of discourse phenomena: coreference, lexical cohesion, coherence, discourse relations, etc. The quality of a documentlevel NMT model therefore can be evaluated based on its ability in dealing with these discourse phenomena.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Widely-used automatic evaluation metrics, e.g., BLEU (Papineni et al., 2002) , normally consider fragments in a local window for translation quality assessment, while cross-sentence discourse links are usually neglected. Hence, for document-level models, current automatic evaluation metrics may be not a reasonably good fit for evaluation. One possible alternative is using manually-created test suites which are composed of carefully selected examples with discourse phenomena (Hardmeier, 2015) .", |
| "cite_spans": [ |
| { |
| "start": 53, |
| "end": 76, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 479, |
| "end": 496, |
| "text": "(Hardmeier, 2015)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Such test suites (Guillou et al., 2018; Rysov\u00e1 et al., 2019; Vojt\u011bchov\u00e1 et al., 2019; Voita et al., 2019; Popovi\u0107, 2019) have been constructed for several language pairs, such as English-Czech, English-German, English-Russian, French-German, but few in English-Chinese translation. In this paper, we propose a test suite aiming at English-Chinese discourse phenomena evaluation. Three frequent discourse phenomena in English-Chinese translation are selected in our test suite, namely pronoun, discourse connective and ellipsis, each of which forms an individual test set. We choose examples from the OpenSubtitles (Lison and Tiedemann, 2016) to construct the three test sets. Unlike corpora from news domain, this corpus is more conversational and colloquial. We use this test suite to evaluate several typical context-aware NMT models. The experiment results show that our test suite can evaluate the ability of NMT models in dealing with discourse phenomena and that it is still very challenging for current context-aware models to capture different discourse phenomena.", |
| "cite_spans": [ |
| { |
| "start": 17, |
| "end": 39, |
| "text": "(Guillou et al., 2018;", |
| "ref_id": null |
| }, |
| { |
| "start": 40, |
| "end": 60, |
| "text": "Rysov\u00e1 et al., 2019;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 61, |
| "end": 85, |
| "text": "Vojt\u011bchov\u00e1 et al., 2019;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 86, |
| "end": 105, |
| "text": "Voita et al., 2019;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 106, |
| "end": 120, |
| "text": "Popovi\u0107, 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 614, |
| "end": 641, |
| "text": "(Lison and Tiedemann, 2016)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Research on the evaluation of document-level machine translation is usually on specific discourse phenomena. A few test suites and methods have been designed for evaluating NMT from the perspective of discourse phenomena.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For pronoun translation evaluation, recent test sets on pronoun evaluation have consisted of contrastive pairs. Bawden et al. (2018) provide 50 example blocks of English-French contrastive pairs. M\u00fcller et al. (2018) have also created contrastive pairs of pronoun \"it\" in English-German translation.", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 132, |
| "text": "Bawden et al. (2018)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 196, |
| "end": 216, |
| "text": "M\u00fcller et al. (2018)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Contrastive test sets allow us to automatically evaluate document-level NMT by only judging whether the evaluated model can choose the correct translation against the wrong from each contrastive pair according to their model score. However, this is an indirect rather than a direct way to evaluate the ability of context-aware NMT in modeling discourse phenomena as we do not evaluate the actual translations generated by these NMT systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To evaluate discourse connective translation, Meyer et al. (2012) propose ACT (accuracy of connective translation) to evaluate connective translation. For French-English discourse relation and discourse connective translation assessment, Smith and Specia (2018) use pretrained bilingual embeddings of discourse connectives. Popovi\u0107 (2019) investigates conjunction disambiguation in English-German and French-German translation.", |
| "cite_spans": [ |
| { |
| "start": 46, |
| "end": 65, |
| "text": "Meyer et al. (2012)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 324, |
| "end": 338, |
| "text": "Popovi\u0107 (2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For the evaluation on ellipsis translation, Voita et al. 2019explore contrastive examples to evaluate the verb phrase ellipsis and morphological inflection in English-Russian translation. In our work, we also investigate verb ellipsis in English-Chinese translation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We choose three types of discourse phenomena, i.e., pronoun, discourse connective and ellipsis, as they appear frequently in English-Chinese documentlevel NMT. In the following parts, we will introduce corpus construction and then the three test sets separately.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Test Sets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Due to the lack of such a test set for English-Chinese translation, we manually construct our test sets. We select instances from the open-source corpus OpenSubtitles (Lison and Tiedemann, 2016) as our data sources. First, we filter out characters and tokens written in languages other than English and Chinese. We then extract snippets with two neighboring sentences. Finally, we select test cases from extracted snippets according to different language phenomena.", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 194, |
| "text": "(Lison and Tiedemann, 2016)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Test Sets Construction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For the construction of the pronoun test set, we discard snippets where the two adjacent sentences both include \"you\" or \"they\" in English. We then construct the test set from the remaining examples that contain \"\u4f60\", \"\u4f60\u4eec\", \"\u5979\u4eec\", \"\u5b83\u4eec\" and \"\u4ed6 \u4eec\" on the Chinese side.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Test Sets Construction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For the construction of the discourse connective test set, we automatically select examples where the second sentence contains specific discourse connectives. From these examples we manually select samples where English sentences contain ambiguous connectives with different senses, according to Webber et al. (2019) .", |
| "cite_spans": [ |
| { |
| "start": 296, |
| "end": 316, |
| "text": "Webber et al. (2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Test Sets Construction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "As for the ellipsis test set, we first choose cases where the second sentence in English contains auxiliary verbs. If the Chinese translations of the chosen cases whether include ellipsis verbs, such cases are finally selected.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Test Sets Construction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "As Chinese translations are provided by nonprofessional translators, they are sometimes noisy with errors. We hire professional translators to review the selected instances and correct translation errors. Each test set contains 400 examples. Data statistics are displayed in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 275, |
| "end": 282, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Test Sets Construction", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In the pronoun test set, we focus on the second person pronoun \"you\" and the third person pronoun \"they\" as well as their accusative and possessive forms. In Chinese, \"you\" can be translated as \"\u4f60\" (single form) or \"\u4f60\u4eec\" (plural form). And \"they\" is translated into words of different genders: \"\u4ed6 \u4eec\" (plural form of \"he\"), \"\u5979\u4eec\" (plural form of \"she\") and \"\u5b83\u4eec\" (plural form of \"it\"). Each type of pronouns has 80 examples in this test set. Figure 1 displays an example from this test set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 438, |
| "end": 446, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Pronoun Test Set", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In order to help document-level NMT models choose a correct translation for \"you\" and \"they\", we provide the previous sentence as context, which is guaranteed to elliminate such translation ambiguity. For \"you\", the preceding sentence usually contains nouns or names which indicate the plural or single information of the pronoun. As for \"they\", nouns with gender information, common names of men and women or non-human nouns in the source side context can be explored for translation disambiguation. Figure 2 . Discourse connectives are important to express the discourse relation between sentences. The same connective in different context, may convey different discourse relations in the sense hierarchy (Webber et al., 2019) . In order to correctly translate these ambiguous connectives, context-aware NMT models have to recognize discourse relations between clauses or sentences by taking sufficient context into account.", |
| "cite_spans": [ |
| { |
| "start": 707, |
| "end": 728, |
| "text": "(Webber et al., 2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 501, |
| "end": 509, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Pronoun Test Set", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We cover verb ellipsis in English in this test set. As illustrated in Figure 3 , Chinese and English exhibit different ellipsis patterns, which pose challenges for machine translation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 70, |
| "end": 78, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ellipsis Test Set", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "If we are only given a sentence with ellipsis, we cannot fully understand this sentence as crucial information may be missing, which can only be recovered by resorting to previous context. For context-aware NMT models, this means that they have to find the elided information if this informa-tion should be present in the target language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ellipsis Test Set", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We used the proposed test suite as a benchmark test bed to evaluate state-of-the-art context-aware NMT models against the three types of discourse phenomena.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We used the following three document-level NMT models:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 thumt: Zhang et al. (2018) extend the Transformer model with a new context encoder to model document-level context, which is then incorporated into the original encoder and decoder. They introduce a two-step training method to explore abundant sentence-level parallel corpora and limited document-level parallel corpora.", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 28, |
| "text": "Zhang et al. (2018)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 CADec: Voita et al. (2019) introduce a twopass framework, which first translates a sentence with a context-agnostic model and refines the target translation with both source and target context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 bert-nmt: Zhu et al. (2020) propose a BERTfused model. They first use BERT to extract representations for an input sequence, and then fuse the representations into each layer of the encoder and decoder of the NMT model through attention mechanisms.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 29, |
| "text": "Zhu et al. (2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We used the following corpora to train the three NMT models: 6M sentence pairs randomly selected from AI Challenger 1 2017 English-Chinese machine translation corpus, IWSLT'17 training data and a subset of OpenSubtitles (Lison and Tiedemann, 2016) corpus in spoken language. IWSLT'17 English-Chinese MT corpus comprises of TED talks.", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 247, |
| "text": "(Lison and Tiedemann, 2016)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Thumt and CADec were trained on the sentencelevel data, i.e., the 6M-sentence subset of the AI Challenger 2017 corpus, in the first stage. In the second phase of context-aware training, the combination of the IWSLT'17 training data and the subset of the OpenSubtitles corpus was used. Bert-nmt was trained on only IWSLT'17 data following Zhu et al. (2020) .", |
| "cite_spans": [ |
| { |
| "start": 338, |
| "end": 355, |
| "text": "Zhu et al. (2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The BLEU scores of the three models on our test suite are shown in Table 2 . In addition to the automatic evaluation, we further performed human evaluation to investigate the translation accuracy on the three types of discourse phenomena. In human evaluation, we focus on whether the relevant phenomena are correctly translated and ignore other errors. Human evaluation is better at evaluating discourse phenomena translation. Human evaluation results are shown in Table 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 67, |
| "end": 74, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 465, |
| "end": 472, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Overall, CADec achieves the best results in most cases but not in all cases. In translating you (sing.), while and ellipsis, thumt achieves the highest accuracy, while bert-nmt is better than the others in translating they (it (pl.)).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "For pronoun translation, \"you\" is usually translated into \"\u4f60\" (you (sing.)) while \"they\" into \"\u4ed6 \u4eec\" (he (pl.)). This is because these two cases are more frequent than other cases (e.g., \"\u4f60\u4eec\", \"\u5b83 \u4eec\"). This also happens for discourse connective translation. For example, \"while\" is often translated into \"\u5f53\u2026\u2026\u65f6\u5019\" rather than \"\u800c\" (but) as the former is more common that the latter.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Compared with pronouns and discourse connectives, ellipsis is more challenging for the three context-aware models, which achieves a translation accuracy of <11%. Verb ellipsis usually occurs in questions or replies in spoken dialogues. We observe that auxiliary verb \"do\" is often wrongly translated into \"do\" (notional verb) or \"know\". This suggests that these context-aware models cannot correctly recognize ellipsis and detect omitted fragments from context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We have presented a discourse-level test suite for the evaluation of context-aware neural machine translation. We constructed 1,200 instances for three types of discourse phenomena in English-Chinese translation, 400 instances per discourse phenomenon. Our experiments with three stateof-the-art document-level NMT models suggest that ellipsis is the most challenging discourse issue among the three test sets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://challenger.ai/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The present research was supported by the National Natural Science Foundation of China (Grant No. 61861130364), Natural Science Foundation of Tianjin (Grant No.19JCZDJC31400) and a Newton International Fellowship from the Royal Society (London)(NAF\\R1\\180122). We would like to thank the anonymous reviewers for their insightful comments. The corresponding author is Professor Deyi Xiong (dyxiong@tju.edu.cn).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Evaluating discourse phenomena in neural machine translation", |
| "authors": [ |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Bawden", |
| "suffix": "" |
| }, |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1304--1313", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1118" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rachel Bawden, Rico Sennrich, Alexandra Birch, and Barry Haddow. 2018. Evaluating discourse phenom- ena in neural machine translation. In Proceedings of the 2018 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Pa- pers), pages 1304-1313, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A pronoun test suite evaluation of the English-German MT systems at WMT 2018", |
| "authors": [], |
| "year": null, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Shared Task Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "570--577", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6435" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "A pronoun test suite evaluation of the English- German MT systems at WMT 2018. In Proceedings of the Third Conference on Machine Translation: Shared Task Papers, pages 570-577, Belgium, Brus- sels. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "On statistical machine translation and translation theory", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Hardmeier", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Second Workshop on Discourse in Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "168--172", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W15-2522" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Hardmeier. 2015. On statistical machine translation and translation theory. In Proceedings of the Second Workshop on Discourse in Machine Translation, pages 168-172, Lisbon, Portugal. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Opensub-titles2016: Extracting large parallel corpora from movie and TV subtitles", |
| "authors": [ |
| { |
| "first": "Pierre", |
| "middle": [], |
| "last": "Lison", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pierre Lison and J\u00f6rg Tiedemann. 2016. Opensub- titles2016: Extracting large parallel corpora from movie and TV subtitles. In LREC.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Machine translation of labeled discourse connectives", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Meyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei", |
| "middle": [], |
| "last": "Popescu-Belis", |
| "suffix": "" |
| }, |
| { |
| "first": "Najeh", |
| "middle": [], |
| "last": "Hajlaoui", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Gesmundo", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Tenth Biennial Conference of the Association for Machine Translation in the Americas (AMTA)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Meyer, Andrei Popescu-Belis, Najeh Hajlaoui, and Andrea Gesmundo. 2012. Machine translation of labeled discourse connectives. In Proceedings of the Tenth Biennial Conference of the Association for Machine Translation in the Americas (AMTA), page 10.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A large-scale test set for the evaluation of context-aware pronoun translation in neural machine translation", |
| "authors": [ |
| { |
| "first": "Mathias", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "Annette", |
| "middle": [], |
| "last": "Rios", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Voita", |
| "suffix": "" |
| }, |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "61--72", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6307" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mathias M\u00fcller, Annette Rios, Elena Voita, and Rico Sennrich. 2018. A large-scale test set for the eval- uation of context-aware pronoun translation in neu- ral machine translation. In Proceedings of the Third Conference on Machine Translation: Research Pa- pers, pages 61-72, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Bleu: A method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics, ACL '02", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1073083.1073135" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: A method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting on Association for Computa- tional Linguistics, ACL '02, page 311318, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Evaluating conjunction disambiguation on English-to-German and French-to-German WMT 2019 translation hypotheses", |
| "authors": [ |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "Popovi\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "464--469", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5353" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maja Popovi\u0107. 2019. Evaluating conjunction dis- ambiguation on English-to-German and French-to- German WMT 2019 translation hypotheses. In Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 464-469, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A test suite and manual evaluation of document-level NMT at WMT19", |
| "authors": [ |
| { |
| "first": "Kate\u0159ina", |
| "middle": [], |
| "last": "Rysov\u00e1", |
| "suffix": "" |
| }, |
| { |
| "first": "Magdal\u00e9na", |
| "middle": [], |
| "last": "Rysov\u00e1", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Musil", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucie", |
| "middle": [], |
| "last": "Pol\u00e1kov\u00e1", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "455--463", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5352" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kate\u0159ina Rysov\u00e1, Magdal\u00e9na Rysov\u00e1, Tom\u00e1\u0161 Musil, Lucie Pol\u00e1kov\u00e1, and Ond\u0159ej Bojar. 2019. A test suite and manual evaluation of document-level NMT at WMT19. In Proceedings of the Fourth Confer- ence on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 455-463, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Assessing crosslingual discourse relations in machine translation", |
| "authors": [ |
| { |
| "first": "Karin", |
| "middle": [ |
| "Sim" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karin Sim Smith and Lucia Specia. 2018. Assessing crosslingual discourse relations in machine transla- tion. ArXiv, abs/1810.03148.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "When a good translation is wrong in context: Context-aware machine translation improves on deixis, ellipsis, and lexical cohesion", |
| "authors": [ |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Voita", |
| "suffix": "" |
| }, |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1198--1212", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1116" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elena Voita, Rico Sennrich, and Ivan Titov. 2019. When a good translation is wrong in context: Context-aware machine translation improves on deixis, ellipsis, and lexical cohesion. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1198-1212, Flo- rence, Italy. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "SAO WMT19 test suite: Machine translation of audit reports", |
| "authors": [ |
| { |
| "first": "Tereza", |
| "middle": [], |
| "last": "Vojt\u011bchov\u00e1", |
| "suffix": "" |
| }, |
| { |
| "first": "Michal", |
| "middle": [], |
| "last": "Nov\u00e1k", |
| "suffix": "" |
| }, |
| { |
| "first": "Milo\u0161", |
| "middle": [], |
| "last": "Klou\u010dek", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "481--493", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5355" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tereza Vojt\u011bchov\u00e1, Michal Nov\u00e1k, Milo\u0161 Klou\u010dek, and Ond\u0159ej Bojar. 2019. SAO WMT19 test suite: Ma- chine translation of audit reports. In Proceedings of the Fourth Conference on Machine Translation (Vol- ume 2: Shared Task Papers, Day 1), pages 481-493, Florence, Italy. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The penn discourse treebank 3.0 annotation manual", |
| "authors": [ |
| { |
| "first": "Bonnie", |
| "middle": [], |
| "last": "Webber", |
| "suffix": "" |
| }, |
| { |
| "first": "Rashmi", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Aravind", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bonnie Webber, Rashmi Prasad, Alan Lee, and Ar- avind Joshi. 2019. The penn discourse treebank 3.0 annotation manual.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Improving the transformer translation model with document-level context", |
| "authors": [ |
| { |
| "first": "Jiacheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Huanbo", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Feifei", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfang", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiacheng Zhang, Huanbo Luan, Maosong Sun, Feifei Zhai, Jingfang Xu, Min Zhang, and Yang Liu. 2018. Improving the transformer translation model with document-level context. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Incorporating bert into neural machine translation", |
| "authors": [ |
| { |
| "first": "Jinhua", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingce", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Lijun", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Wengang", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Houqiang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinhua Zhu, Yingce Xia, Lijun Wu, Di He, Tao Qin, Wengang Zhou, Houqiang Li, and Tie-Yan Liu. 2020. Incorporating bert into neural machine trans- lation. ArXiv, abs/2002.06823.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "source: context: You rich guys think that money can buy anything. current: How right you are. target: context: \u4f60\u4eec\u5bcc\u4eba\u603b\u4ee5\u4e3a\u94b1\u80fd\u4e70\u5230\u4e00\u5207\u3002 current: \u4f60\u4eec\u60f3\u7684\u592a\u5bf9\u4e86\u3002 Figure 1: An example from the pronoun test set." |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "An example from the ellipsis test set.larly, we select five ambiguous discourse connectives according to Webber et al.(2019), namely while, as, since, though and or. Different senses of these ambiguous connectives are frequently occurring in English texts. The number of cases for each connective is 80. An example of discourse connective in this test set is demonstrated in" |
| }, |
| "TABREF1": { |
| "text": "Data statistics.", |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>source:</td></tr><tr><td>context: Everything is so difficult in life, for me.</td></tr><tr><td>current: While for others it's all child's play.</td></tr><tr><td>target:</td></tr><tr><td>context: \u5bf9\u4e8e\u6211\uff0c\u751f\u6d3b\u4e00\u5207\u90fd\u5f88\u8270\u96be\u3002</td></tr><tr><td>current: \u5bf9\u4e8e\u522b\u4eba\u5374\u90fd\u50cf\u513f\u620f\u4e00\u6837\u3002</td></tr><tr><td>Figure 2: An example from the discourse connective</td></tr><tr><td>test set.</td></tr><tr><td>source:</td></tr><tr><td>context: You see, she doesn't know.</td></tr><tr><td>current: Neither do I.</td></tr><tr><td>target:</td></tr><tr><td>context: \u770b\uff0c\u5979\u4e0d\u77e5\u9053\u3002</td></tr><tr><td>current: \u6211\u4e5f\u4e0d\u77e5\u9053\u3002</td></tr></table>", |
| "html": null |
| }, |
| "TABREF3": { |
| "text": "BLEU scores on the three test sets.", |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td/><td colspan=\"6\">you (pl.) you (sing.) it (pl.) she (pl.) he (pl.) while</td><td>as</td><td colspan=\"2\">since though</td><td>or</td><td colspan=\"3\">pronoun connective ellipsis</td></tr><tr><td>thumt</td><td>8.75</td><td>97.5</td><td>11.25</td><td>7.5</td><td>91.25</td><td>57.5</td><td>30</td><td colspan=\"2\">41.25 48.75</td><td>67.5</td><td>49</td><td>43.25</td><td>10.75</td></tr><tr><td>CADec</td><td>22.5</td><td>96.25</td><td>33.75</td><td>20</td><td>92.5</td><td colspan=\"3\">48.75 53.75 56.25</td><td>57.5</td><td>83.75</td><td>53</td><td>60</td><td>2.25</td></tr><tr><td>bert-nmt</td><td>18.75</td><td>93.75</td><td>38.75</td><td>0</td><td>90</td><td colspan=\"4\">53.75 31.25 41.25 46.25</td><td>75</td><td>48.25</td><td>49.5</td><td>5.75</td></tr></table>", |
| "html": null |
| }, |
| "TABREF4": { |
| "text": "Human evaluation results (accuracy %) on the three test sets.", |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |