| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:06:53.029735Z" |
| }, |
| "title": "Findings of the Fourth Workshop on Neural Generation and Translation", |
| "authors": [ |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Heafield", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh Heriot-Watt University", |
| "location": { |
| "addrLine": "\u2665 Apple", |
| "settlement": "Facebook" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hiroaki", |
| "middle": [], |
| "last": "Hayashi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh Heriot-Watt University", |
| "location": { |
| "addrLine": "\u2665 Apple", |
| "settlement": "Facebook" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "\u2666", |
| "middle": [], |
| "last": "Yusuke", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh Heriot-Watt University", |
| "location": { |
| "addrLine": "\u2665 Apple", |
| "settlement": "Facebook" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ioannis", |
| "middle": [], |
| "last": "Konstas", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh Heriot-Watt University", |
| "location": { |
| "addrLine": "\u2665 Apple", |
| "settlement": "Facebook" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Finch", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh Heriot-Watt University", |
| "location": { |
| "addrLine": "\u2665 Apple", |
| "settlement": "Facebook" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh Heriot-Watt University", |
| "location": { |
| "addrLine": "\u2665 Apple", |
| "settlement": "Facebook" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Xian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh Heriot-Watt University", |
| "location": { |
| "addrLine": "\u2665 Apple", |
| "settlement": "Facebook" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh Heriot-Watt University", |
| "location": { |
| "addrLine": "\u2665 Apple", |
| "settlement": "Facebook" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "\u2660", |
| "middle": [ |
| "\u2666" |
| ], |
| "last": "Carnegie", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh Heriot-Watt University", |
| "location": { |
| "addrLine": "\u2665 Apple", |
| "settlement": "Facebook" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Mellon", |
| "middle": [], |
| "last": "University", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh Heriot-Watt University", |
| "location": { |
| "addrLine": "\u2665 Apple", |
| "settlement": "Facebook" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Google", |
| "middle": [], |
| "last": "Research", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh Heriot-Watt University", |
| "location": { |
| "addrLine": "\u2665 Apple", |
| "settlement": "Facebook" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We describe the finding of the Fourth Workshop on Neural Generation and Translation, held in concert with the annual conference of the Association for Computational Linguistics (ACL 2020). First, we summarize the research trends of papers presented in the proceedings. Second, we describe the results of the three shared tasks 1) efficient neural machine translation (NMT) where participants were tasked with creating NMT systems that are both accurate and efficient, and 2) document-level generation and translation (DGT) where participants were tasked with developing systems that generate summaries from structured data, potentially with assistance from text in another language and 3) STAPLE task: creation of as many possible translations of a given input text. This last shared task was organised by Duolingo.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We describe the finding of the Fourth Workshop on Neural Generation and Translation, held in concert with the annual conference of the Association for Computational Linguistics (ACL 2020). First, we summarize the research trends of papers presented in the proceedings. Second, we describe the results of the three shared tasks 1) efficient neural machine translation (NMT) where participants were tasked with creating NMT systems that are both accurate and efficient, and 2) document-level generation and translation (DGT) where participants were tasked with developing systems that generate summaries from structured data, potentially with assistance from text in another language and 3) STAPLE task: creation of as many possible translations of a given input text. This last shared task was organised by Duolingo.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Neural sequence to sequence models (Kalchbrenner and Blunsom, 2013; Sutskever et al., 2014; Bahdanau et al., 2015) are the workhorse behind a wide variety of different natural language processing tasks such as machine translation, generation, summarization and simplification. The 4th Workshop on Neural Machine Translation and Generation (WNGT 2020) provided a forum for research in applications of neural models to machine translation and other language generation tasks (including summarization, NLG from structured data, dialog response generation, among others). Overall, the workshop was held with two goals. First, it aimed to synthesize the current state of knowledge in neural machine translation and generation: this year we continued to encourage submissions that not only advance the state of the art through algorithmic advances, but also analyze and understand the current state of the art, pointing to future research directions. Towards this goal, we received a number of high-quality research contributions on the workshop topics, as summarized in Section 2. Second, the workshop aimed to expand the research horizons in NMT: we continued to organize the Efficient NMT task which encouraged participants to develop not only accurate but computationally efficient systems. This task had three participants each with a number of individual systems. We organized the second shared task on \"Document-level Generation and Translation\", which aims to push forward document-level generation technology and contrast the methods for different types of inputs. Unfortunately this task only had one participant. Finally, we introduced a new shared task, organised by Duolingo, which encouraged models to produce as many correct translations as possible for a given input. This task generated a lot of interest and there were 11 participants. The results of the shared task are summarized in Sections 3, 4 and 5.", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 67, |
| "text": "(Kalchbrenner and Blunsom, 2013;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 68, |
| "end": 91, |
| "text": "Sutskever et al., 2014;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 92, |
| "end": 114, |
| "text": "Bahdanau et al., 2015)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Similar to last year we invited the MT and NLG community to contribute to the workshop with long papers, extended abstracts for preliminary work, and cross-submissions of papers that have appeared in other venues. Keeping up with with the main vision of the workshop, we were aiming for a variety of works at the intersection of Machine Translation and Language Generation tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary of Research Contributions", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We received a total of 28 submissions, from which we accepted 16. There were 2 crosssubmissions, 3 extended abstracts and 11 full papers. There were also 15 system submission papers. We elicted two double-blind reviews for each submission, avoiding conflicts of interest.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary of Research Contributions", |
| "sec_num": "2" |
| }, |
| { |
| "text": "With regards to thematology there were 8 papers with a focus on Natural Language Generation and 8 with the application of Machine Translation in mind. The underlying emphasis across submissions was placed this year on capitalizing on the use of pre-training models (e.g., BERT; (Devlin et al., 2019) especially for low-resource datasets. The quality of the accepted publications was very high; there was a significant drop in numbers though in comparison to last year (36 accepted papers from 68 submissions) which is most likely due to the extra overhead on conducting research under lockdown policies sanctioned globally due to COVID-19 pandemic.", |
| "cite_spans": [ |
| { |
| "start": 278, |
| "end": 299, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary of Research Contributions", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The efficiency task complements machine translation quality evaluation campaigns by also measuring and optimizing the computational cost of inference. This is the third edition of the task, updating and building upon the second edition of the task (Hayashi et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 248, |
| "end": 270, |
| "text": "(Hayashi et al., 2019)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Efficiency Task", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We asked participants to build English\u2192German machine translation systems following the data condition of the 2019 Workshop on Machine Translation (Barrault et al., 2019) and submit them as Docker containers. Docker contains enabled consistent measurement of computational cost on several dimensions: time, memory, and disk space. These are measured under three hardware conditions: a GPU, a single CPU core, and multi-core CPU on all cores. Participants were free to choose what metrics and hardware platforms to optimize for.", |
| "cite_spans": [ |
| { |
| "start": 147, |
| "end": 170, |
| "text": "(Barrault et al., 2019)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Efficiency Task", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Three teams submitted to the shared task: Niu-Trans, OpenNMT, and UEdin. All teams submitted to the GPU and multi-core CPU tracks; Open-NMT and UEdin submitted to the single-CPU track. Some CPU submissions from UEdin had a memory leak; their post-deadline fix is shown as \"UEdin Fix.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Efficiency Task", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Common techniques across teams were variations on the transformer architecture, model distillation, 16-bit floating point inference on GPUs (except OpenNMT), and 8-bit integer inference on CPUs (except NiuTrans). Curiously, all submissions used autoregressive models despite the existence of non-autoregressive models motivated by speed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Efficiency Task", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The GPU track used a g4dn.xlarge instance with one NVIDIA T4 GPU, 16 GB GPU RAM, 16 GB host RAM, and 2 physical cores of an Intel Xeon Platinum 8259CL CPU. The NVIDIA T4 GPU is relatively small compared to the NVIDIA V100 GPU, but the newer Turing architecture introduces support for 4-bit and 8-bit integer operations in Tensor Cores. In practice, however, participants used floating-point operations on the GPU even though both OpenNMT and UEdin used 8-bit integers in their CPU submissions. This was primarily due to code readiness. Timing was run on a nonexclusive virtual machine because the instance is not yet available without virtualization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hardware", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The CPU tracks used a c5.metal instance which has two sockets of the Intel Xeon Platinum 8275CL CPU, 48 physical cores, hyperthreading enabled, and 192 GB RAM. As a Cascade Lake processor, it supports the Vector Neural Network Instructions (VNNI) that OpenNMT and UEdin used for 8-bit integer matrix multiplication. For the single core track, we reserved the entire machine then ran Docker with --cpuset-cpus=0. For the multi-core track, participants were free to configure their own CPU sets and affinities. The c5.metal instance runs directly on the full hardware; it is not a virtual machine.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hardware", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Teams were offered AWS time to tune their submissions on the test hardware. All participants experimented on the test hardware using provided time or their own funds.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hardware", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Previous editions of the task specified the test set, but last year's organizers removed a team for generating the test outputs even with empty input. Moreover, translation time for some submissions was approaching one second and often lower than loading time. Hence we updated the task to make it more robust to adversarial participants while also increasing reliability of speed measurements. We told participants the test set would have one million lines, lines would have at most 100 space-separated words, source sentences from an unspecified quality evaluation corpus would be hidden in their input, and quality would be evaluated with BLEU.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measurement", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "After the submission deadline, we announced the main quality score is the unweighted average SacreBLEU 1 (Post, 2018) was excluded because it has lines longer than 100 words. We refer to this score as WMT1* while also reporting the usual WMT19 scores for the translation task. Shown in Table 1 , the test set consisted of the aforementioned WMT input sentences and filler. For filler, we used parallel corpora outside the WMT data condition to verify that the system was still translating reasonably. Specifically, we used a recent crawl of the European Medicines Agency (EMEA), 3 the Tateoba project, 4 and a crawl of the German Federal Foreign Office Berlin 5 all gathered by the European Language Resource Consortium. We do not consider the filler corpora clean or indomain enough to be official evaluations of quality; results appear in supplementary material. To meet our promise to participants that lines would not be longer than 100 words (space-separated tokens), we excluded WMT12 and removed any English sentences longer than 100 words from the filler. We then truncated the German Federal Foreign Office Berlin corpus to obtain a total of 1 million lines. The input sentences were randomly shuffled and mixed across corpora, retaining a separate file to enable reconstruction. The final corpus and evaluation tools are available at http: //data.statmt.org/heafield/wngt20/test/.", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 117, |
| "text": "(Post, 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 286, |
| "end": 293, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Measurement", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Time was measured with wall (real) time reported by time and CPU time reported by the kernel for the process group. We no longer measure loading time because it is small compared to the cost of translating 1 million sentences, is easy to game with busywork, and some toolkits do lazy initialization which makes loading time difficult to measure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measurement", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Peak RAM consumption was measured using memory.max usage in bytes from the kernel for the CPU and by polling nvidia-smi for the GPU. Swap was disabled.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measurement", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Participants were told to separate their Docker images into model and code files so that models could be measured separately from the relatively noisy size of code and libraries. A model was defined as \"everything derived from data: all model parameters, vocabulary files, BPE configuration if applicable, quantization parameters or lookup tables where applicable, and hyperparameters like embedding sizes.\" Code could include \"simple rule-based tokenizer scripts and hard-coded model structure that could plausibly be used for another language pair.\" They were also permitted to use standard compression tools such as xz to compress models; decompression time was included in results but small relative to the cost of translation. We report size of the model directory and Docker image size, both captured before the model ran.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measurement", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Each evaluation started from a fresh boot of a constant Ubuntu 18.04 LTS disk image (one for CPU and one for GPU). Internet access was blocked at the cloud provider level except for the evaluation controller. This also prevented automatic upgrades.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measurement", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Measurements are reported in Table 2 . The tradeoffs between quality, model size, speed, and RAM are shown in Figure 1 . We compare the costeffectiveness of GPU and multi-core CPU hardware at the prices charged by Amazon Web Services in Figure 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 29, |
| "end": 36, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 110, |
| "end": 118, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 237, |
| "end": 245, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Every team had a Pareto optimal submission for speed. This is largely due to teams focusing on different parts of the Pareto curve. OpenNMT focused on fast, small, and lower-quality systems plus one higher-quality submission. UEdin focused on higher-quality systems that were slower. Two of NiuTrans's four GPU submissions were Pareto optimal on speed, lying between OpenNMT and UEdin; their multi-core CPU submission performed poorly on all metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Regarding model size, OpenNMT and UEdin made a range of Pareto-optimal submissions, mostly driven by the number of parameters and 8-bit quantization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "OpenNMT's small lower-quality models have low CPU RAM and Docker image size; UEdin is Pareto-optimal for higher-quality models. Open-NMT was the only team to optimize for these metrics in their system description. In their multicore CPU submission, OpenNMT shared memory amongst processes while other participants simply used multiple processes with copies of the model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Following the previous workshop, we continued with the shared task of document-level generation and translation. This task is motivated as the central evaluation testbed for document-level generation systems with different types of inputs by providing parallel dataset consisting of structured tables and text in two languages. We host various tracks within the testbed based on input and output constraints and investigate and contrast the system differences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Generation and Translation Task", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In particular, we conducted the following six tracks:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Generation and Translation Task", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\u2022 NLG (Data \u2192 En, Data \u2192 De): Generate a document summary in the target language given only structured tables (i.e., data-to-text).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Generation and Translation Task", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\u2022 MT (De \u2194 En): Translate a document in the source language to the target language (i.e., document-level translation).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Generation and Translation Task", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\u2022 MT+NLG (Data+En \u2192 De, Data+De \u2192 En): Generate a document summary given the structured tables and the summary in another language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Generation and Translation Task", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We employ standard evaluation metrics for the tasks above along two axes following (Hayashi et al., 2019) :", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 105, |
| "text": "(Hayashi et al., 2019)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Measures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Textual Accuracy: BLEU (Papineni et al., 2002) and ROUGE (Lin, 2004) as measures for surface-level texutal accuracy compared to reference summaries.", |
| "cite_spans": [ |
| { |
| "start": 23, |
| "end": 46, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 57, |
| "end": 68, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Measures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Content Accuracy: Relation generation (RG), content selection (CS), and content ordering (CO) metrics (Wiseman et al., 2017) to assess the fidelity of the content to the input data.", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 124, |
| "text": "(Wiseman et al., 2017)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Measures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "An information extraction model is employed for content accuracy measures for each target language. We followed (Wiseman et al., 2017) and ensembled six information extraction models (three CNN-based, three LSTM-based) with different random seeds.", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 134, |
| "text": "(Wiseman et al., 2017)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Measures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We re-use Rotowire English-German dataset (Hayashi et al., 2019) , which consists of a subset of the Rotowire dataset (Wiseman et al., 2017) with professional German translations. Each instance corresponds to an NBA game and consists of a box-score table for the match, base information about the teams (e.g. team name, city), English game summary, and the same game summary translated to German. Final evaluation was performed on the test split of the Rotowire English-German dataset.", |
| "cite_spans": [ |
| { |
| "start": 42, |
| "end": 64, |
| "text": "(Hayashi et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 118, |
| "end": 140, |
| "text": "(Wiseman et al., 2017)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We followed the same setting in terms of additional resources participants could adopt.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Systems conforming to the data requirements are marked constrained, otherwise unconstrained. Results are indicated by the initials (C/U).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We prepared two baselines for different tracks:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "FairSeq-19", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We use FairSeq (Ng et al., 2019) (WMT'19 single model 6 ) for MT and MT+NLG tracks.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 32, |
| "text": "(Ng et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We use a two-stage model from (Puduppully et al., 2019) for NLG tracks. English model was with the pretrained weights by the author and German model was trained only on Rotowire English-German dataset.", |
| "cite_spans": [ |
| { |
| "start": 30, |
| "end": 55, |
| "text": "(Puduppully et al., 2019)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NCP+CC:", |
| "sec_num": null |
| }, |
| { |
| "text": "One team participated in the task, who focused on the German-English MT track of the task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Submitted Systems", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Team FJWU developed a system around Transformer-based sequence-to-sequence model. Additionally, the model employed hierarchical attention following (Miculicich et al., 2018) for both encoder and decoder to account for the documentlevel context. The system was trained in a twostage process, where a base (sentence-level) NMT model was trained followed by the training of hierarchcal attention networks component. To handle the scarcity of in-domain translation data, they experimented with upsizing the in-domain data up to three times to construct training data. Their ablation experiments showed that this upsizing of in-domain data is effective at increasing the BLEU score.", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 173, |
| "text": "(Miculicich et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Submitted Systems", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We show the MT track results in Table 3 . We confirm that the use of both document-level models and in-domain data helps achieve better BLEU score, which has also been shown from the last workshop (Hayashi et al., 2019 Table 3 : DGT results on the MT track (De \u2192 En).", |
| "cite_spans": [ |
| { |
| "start": 197, |
| "end": 218, |
| "text": "(Hayashi et al., 2019", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 32, |
| "end": 39, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 219, |
| "end": 226, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Machine translation systems are typically trained to produce a single output, but in certain cases, it is desirable to have many possible translations of a given input text. At Duolingo, the world's largest online language-learning platform, 7 we grade translationbased challenges with sets of human-curated acceptable translation options. Given the many ways of expressing a piece of text, these sets are slow to create, and may be incomplete. This process is ripe for improvement with the aid of rich multi-output translation and paraphrase systems. To this end, we introduce a shared task called STAPLE: Simultaneous Translation and Paraphrasing for Language Education (Mayhew et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 672, |
| "end": 693, |
| "text": "(Mayhew et al., 2020)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "STAPLE Task", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this shared task, participants are given a training set consisting of 2500 to 4000 English sentences (or prompts), each of which is paired with a list of comprehensive translations in the target language, weighted and ordered by normalized learner response frequency. At test time, participants are given 500 English prompts, and are required to produce the set of comprehensive translations for each prompt. We also provide a high-quality automatic reference translation for each prompt, in the event that a participant wants to work on paraphrase-only approaches. The target languages were Hungarian, Japanese, Korean, Portuguese, and Vietnamese.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "There were 20 participants who submitted to the development phase, 14 participants who submitted to the test phase, and 11 participants who submitted system description papers. Submission models largely consisted of high-quality machine translation systems fine-tuned on in-domain shared task data from Duolingo, with different tricks for training, ensembling, and output filtering.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Submitted Systems", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In the test phase, three teams submitted to all 5 language tracks, and one team submitted to two tracks (Portuguese, and Hungarian). Of the remaining single-language submissions, Portuguese and Japanese were the most popular. In these single language submissions, teams did not tend to take language-specific approaches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Submitted Systems", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Submission performance varied widely, but nearly all submissions improved significantly over organizer-provided baselines. The top submissions have comparable scores to taking the top 5 translations from each gold translation set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Techniques popular among the more successful teams included weighting of training data according to learner response frequency, and classifierbased output filtering. Interestingly, techniques such as diverse beam search and beam reranking did not appear to improve results, despite their close relevance to the task. For more details and analysis, see Mayhew et al. (2020) .", |
| "cite_spans": [ |
| { |
| "start": 352, |
| "end": 372, |
| "text": "Mayhew et al. (2020)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "This paper summarized the results of the Fourth Workshop on Neural Generation and Translation, where we saw a number of research advances. Particularly, this year introduced a more rigorous efficiency task, and a new STAPLE task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://edin.ac/2TSPnC7 4 https://edin.ac/2ywYp01 5 https://edin.ac/3bWrBes", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "www.duolingo.com", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The efficiency shared task was partly funded from European Union's Horizon 2020 research and innovation programme under grant agreement No 825303 (Bergamot) and by the Connecting Europe Facility (CEF) -Telecommunications from the project No 2019-EU-IA-0045 (User-focused Marian). This work represents the authors' opinions, not necessarily those of the European Union.We thank Amazon Web Services for its gift of credits to support the efficiency shared task evaluation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "7" |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "3rd International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd Inter- national Conference on Learning Representations, ICLR 2015.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Findings of the 2019 conference on machine translation (wmt19)", |
| "authors": [ |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Marta", |
| "middle": [ |
| "R" |
| ], |
| "last": "Costa-Juss\u00e0", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Federmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Fishel", |
| "suffix": "" |
| }, |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Huck", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| }, |
| { |
| "first": "Mathias", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "1--61", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lo\u00efc Barrault, Ond\u0159ej Bojar, Marta R. Costa-juss\u00e0, Christian Federmann, Mark Fishel, Yvette Gra- ham, Barry Haddow, Matthias Huck, Philipp Koehn, Shervin Malmasi, Christof Monz, Mathias M\u00fcller, Santanu Pal, Matt Post, and Marcos Zampieri. 2019. Findings of the 2019 conference on machine trans- lation (wmt19). In Proceedings of the Fourth Con- ference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 1-61, Florence, Italy. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Findings of the third workshop on neural generation and translation", |
| "authors": [ |
| { |
| "first": "Hiroaki", |
| "middle": [], |
| "last": "Hayashi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Oda", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| }, |
| { |
| "first": "Ioannis", |
| "middle": [], |
| "last": "Konstas", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Finch", |
| "suffix": "" |
| }, |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Katsuhito", |
| "middle": [], |
| "last": "Sudoh", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 3rd Workshop on Neural Generation and Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "1--14", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-5601" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hiroaki Hayashi, Yusuke Oda, Alexandra Birch, Ioan- nis Konstas, Andrew Finch, Minh-Thang Luong, Graham Neubig, and Katsuhito Sudoh. 2019. Find- ings of the third workshop on neural generation and translation. In Proceedings of the 3rd Workshop on Neural Generation and Translation, pages 1-14, Hong Kong. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Recurrent continuous translation models", |
| "authors": [ |
| { |
| "first": "Nal", |
| "middle": [], |
| "last": "Kalchbrenner", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nal Kalchbrenner and Phil Blunsom. 2013. Recurrent continuous translation models. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "ROUGE: A Package for Automatic Evaluation of Summaries", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Text Summarization Branches Out", |
| "volume": "", |
| "issue": "", |
| "pages": "74--81", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. 2004. ROUGE: A Package for Auto- matic Evaluation of Summaries. In Text Summariza- tion Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Simultaneous translation and paraphrase for language education", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Mayhew", |
| "suffix": "" |
| }, |
| { |
| "first": "Klinton", |
| "middle": [], |
| "last": "Bicknell", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brust", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Mcdowell", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Monroe", |
| "suffix": "" |
| }, |
| { |
| "first": "Burr", |
| "middle": [], |
| "last": "Settles", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the ACL Workshop on Neural Generation and Translation (WNGT). ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Mayhew, Klinton Bicknell, Chris Brust, Bill McDowell, Will Monroe, and Burr Settles. 2020. Si- multaneous translation and paraphrase for language education. In Proceedings of the ACL Workshop on Neural Generation and Translation (WNGT). ACL.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Document-level neural machine translation with hierarchical attention networks", |
| "authors": [ |
| { |
| "first": "Lesly", |
| "middle": [], |
| "last": "Miculicich", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhananjay", |
| "middle": [], |
| "last": "Ram", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikolaos", |
| "middle": [], |
| "last": "Pappas", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Henderson", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2947--2954", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1325" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lesly Miculicich, Dhananjay Ram, Nikolaos Pappas, and James Henderson. 2018. Document-level neu- ral machine translation with hierarchical attention networks. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, pages 2947-2954, Brussels, Belgium. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Facebook FAIR's WMT19 news translation task submission", |
| "authors": [ |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyra", |
| "middle": [], |
| "last": "Yee", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexei", |
| "middle": [], |
| "last": "Baevski", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "314--319", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5333" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nathan Ng, Kyra Yee, Alexei Baevski, Myle Ott, Michael Auli, and Sergey Edunov. 2019. Facebook FAIR's WMT19 news translation task submission. In Proceedings of the Fourth Conference on Ma- chine Translation (Volume 2: Shared Task Papers, Day 1), pages 314-319, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1073083.1073135" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A call for clarity in reporting BLEU scores", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "186--191", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6319" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186- 191, Belgium, Brussels. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Data-to-text generation with content selection and planning", |
| "authors": [ |
| { |
| "first": "Ratish", |
| "middle": [], |
| "last": "Puduppully", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "33", |
| "issue": "", |
| "pages": "6908--6915", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ratish Puduppully, Li Dong, and Mirella Lapata. 2019. Data-to-text generation with content selection and planning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 6908-6915.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural networks. In Advances in neural information processing sys- tems, pages 3104-3112.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Challenges in Data-to-Document Generation", |
| "authors": [ |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Wiseman", |
| "suffix": "" |
| }, |
| { |
| "first": "Stuart", |
| "middle": [], |
| "last": "Shieber", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2253--2263", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sam Wiseman, Stuart Shieber, and Alexander Rush. 2017. Challenges in Data-to-Document Generation. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 2253-2263.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Performance of Efficiency Task Submissions.", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td>Corpus</td><td>Lines</td><td colspan=\"2\">Words Characters</td></tr><tr><td>EMEA</td><td colspan=\"2\">759876 13152485</td><td>86584513</td></tr><tr><td>Tatoeba</td><td>214943</td><td>1398154</td><td>7303297</td></tr><tr><td>Federal</td><td>785</td><td>13458</td><td>87724</td></tr><tr><td>WMT10</td><td>2489</td><td>54021</td><td>328648</td></tr><tr><td>WMT11</td><td>3003</td><td>65829</td><td>396884</td></tr><tr><td>WMT13</td><td>3000</td><td>56089</td><td>332972</td></tr><tr><td>WMT14</td><td>2737</td><td>54268</td><td>329121</td></tr><tr><td>WMT15</td><td>2169</td><td>40771</td><td>241016</td></tr><tr><td>WMT16</td><td>2999</td><td>56789</td><td>337711</td></tr><tr><td>WMT17</td><td>3004</td><td>56435</td><td>336817</td></tr><tr><td>WMT18</td><td>2998</td><td>58628</td><td>351779</td></tr><tr><td>WMT19</td><td>1997</td><td>42034</td><td>249742</td></tr><tr><td>Total</td><td colspan=\"2\">1000000 15048961</td><td>96880224</td></tr></table>", |
| "type_str": "table", |
| "text": "on WMT test sets from 2010-2019, excluding 2012.2 The 2012 test set The WMT 2020 test set was not yet available and others were out of the domain the systems were trained for.", |
| "html": null, |
| "num": null |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Size of corpora in the efficiency task input.", |
| "html": null, |
| "num": null |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td>5</td></tr></table>", |
| "type_str": "table", |
| "text": "Submissions to the efficiency shared task sorted in decreasing order of WMT1* BLEU. Systems translated 1,000,000 lines with 15,048,961 space-separated words. GPU submissions including host CPU memory usage. GPU RAM is shown above.", |
| "html": null, |
| "num": null |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td>System</td><td colspan=\"2\">BLEU Type</td></tr><tr><td>FJWU</td><td>45.04</td><td>C</td></tr><tr><td colspan=\"2\">FairSeq-19 42.91</td><td>C</td></tr><tr><td>).</td><td/><td/></tr></table>", |
| "type_str": "table", |
| "text": "Model identifier: transformer.wmt19.en-de, transformer.wmt19.de-en.", |
| "html": null, |
| "num": null |
| } |
| } |
| } |
| } |