| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:55:11.196796Z" |
| }, |
| "title": "Best-First Beam Search", |
| "authors": [ |
| { |
| "first": "Clara", |
| "middle": [], |
| "last": "Meister", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "ETH Z\u00fcrich Johns Hopkins University * University of Cambridge", |
| "location": {} |
| }, |
| "email": "clara.meister@inf.ethz.ch" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Vieira", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "ETH Z\u00fcrich Johns Hopkins University * University of Cambridge", |
| "location": {} |
| }, |
| "email": "tim.vieira@gmail.com" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "ETH Z\u00fcrich Johns Hopkins University * University of Cambridge", |
| "location": {} |
| }, |
| "email": "ryan.cotterell@inf.ethz.ch" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Decoding for many NLP tasks requires an effective heuristic algorithm for approximating exact search because the problem of searching the full output space is often intractable, or impractical in many settings. The default algorithm for this job is beam search-a pruned version of breadth-first search. Quite surprisingly, beam search often returns better results than exact inference due to beneficial search bias for NLP tasks. In this work, we show that the standard implementation of beam search can be made up to 10x faster in practice. Our method assumes that the scoring function is monotonic in the sequence length, which allows us to safely prune hypotheses that cannot be in the final set of hypotheses early on. We devise effective monotonic approximations to popular nonmonontic scoring functions, including length normalization and mutual information decoding. Lastly, we propose a memory-reduced variant of best-first beam search, which has a similar beneficial search bias in terms of downstream performance, but runs in a fraction of the time.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Decoding for many NLP tasks requires an effective heuristic algorithm for approximating exact search because the problem of searching the full output space is often intractable, or impractical in many settings. The default algorithm for this job is beam search-a pruned version of breadth-first search. Quite surprisingly, beam search often returns better results than exact inference due to beneficial search bias for NLP tasks. In this work, we show that the standard implementation of beam search can be made up to 10x faster in practice. Our method assumes that the scoring function is monotonic in the sequence length, which allows us to safely prune hypotheses that cannot be in the final set of hypotheses early on. We devise effective monotonic approximations to popular nonmonontic scoring functions, including length normalization and mutual information decoding. Lastly, we propose a memory-reduced variant of best-first beam search, which has a similar beneficial search bias in terms of downstream performance, but runs in a fraction of the time.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Beam search is a common heuristic algorithm for decoding structured predictors (e.g., neural machine translation models and transition-based parsers). Because of the widespread adoption of recurrent neural networks and other non-Markov models, traditional dynamic programming solutions, such as the Viterbi algorithm (Viterbi, 1967) , are prohibitively inefficient; this makes beam search a common component of many stateof-the-art NLP systems. Despite offering no formal guarantee of finding the highest-scoring hypothesis under the model, beam search yields impressive performance on a variety of tasks-unexpectedly providing a beneficial search bias over exact search for many tasks (Stahlberg and Byrne, 2019) .", |
| "cite_spans": [ |
| { |
| "start": 317, |
| "end": 332, |
| "text": "(Viterbi, 1967)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 686, |
| "end": 713, |
| "text": "(Stahlberg and Byrne, 2019)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Within NLP, most research on beam search has focused on altering the log-probability scoring function to return improved results, for example, higher BLEU scores Murray and Chiang, 2018; Shu and Nakayama, 2018; Yang et al., 2018) or a more diverse set of outputs (Vijayakumar et al., 2016) . However, little work has been done to speed up beam search itself. Filling this gap, this paper focuses on reformulating beam search in order to make it faster. We propose best-first beam search, a prioritized version of traditional beam search that is up to an order of magnitude faster in practice while still returning the same set of results. We additionally discuss an even faster heuristic version of our algorithm that further limits the number of candidate solutions, leading to a smaller memory footprint while still finding good solutions.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 186, |
| "text": "Murray and Chiang, 2018;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 187, |
| "end": 210, |
| "text": "Shu and Nakayama, 2018;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 211, |
| "end": 229, |
| "text": "Yang et al., 2018)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 263, |
| "end": 289, |
| "text": "(Vijayakumar et al., 2016)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Concretely, we offer a novel interpretation of beam search as an agenda-based algorithm where traditional beam search is recovered by utilizing a length-based prioritization scheme. We prove that a specific best-first prioritization scheme, as in classic A * search (Hart et al., 1968) , allows for the elimination of paths that will necessarily fall off the beam; for many scoring functions, including standard log-probability scoring, we can still guarantee the same k hypotheses as traditional beam search are returned. Indeed, our algorithm returns beam search's top hypothesis the first time it encounters a complete hypothesis, allowing the program to stop early. Further, we discuss the application of best-first beam search to several popular scoring functions in the literature Li et al., 2016) ; this demonstrates that we have a general framework for adapting a variety of rescoring methods and alternate objectives to work with our algorithm.", |
| "cite_spans": [ |
| { |
| "start": 266, |
| "end": 285, |
| "text": "(Hart et al., 1968)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 787, |
| "end": 803, |
| "text": "Li et al., 2016)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Empirically, we compare best-first beam search to ordinary beam search on two NLP sequence-tosequence tasks: neural machine translation (NMT) and abstractive summarization (AS). On NMT, we find that our algorithm achieves roughly a 30% speed-up over traditional beam search with increased gains for larger beams (e.g., \u2248 10x for a beam of 500). We find similar results hold for AS. Finally, we show that our memoryreduced version, which limits the number of active hypotheses, leads to additional speed-ups over best-first beam search across beam sizes while maintaining similar BLEU scores.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A core operation in structured prediction models is the determination of the highest-scoring output for a given input under a learned scoring model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y def = arg max y\u2208Y(x) score(x, y)", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where x is an input and Y(x) is a set of wellformed outputs for the input. An important example of (1) is maximum a posteriori (MAP),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y MAP def = arg max y\u2208Y(x) p(y | x)", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our work focuses on sequence-to-sequence transduction: predicting an output sequence given an input sequence. One such task is machine translation, wherein a source-language sentence is mapped (''transduced'') to a target-language sentence. While our exposition focuses on sequenceto-sequence prediction, our algorithms are directly applicable to any sequential structured prediction model, such as transition-based parsers (Nivre et al., 2008) and sequence taggers (McCallum et al., 2000; Lafferty et al., 2001) . . . . , x N x be an input sequence of length N x and, likewise, let y = y 1 , . . . , y N y be an output sequence of length N y . Each y t is an element of V, the set of output tokens. Finally, let Y(x) be the set of all valid output sequences (i.e., complete hypotheses). For the task of language generation, which we focus on experimentally, this set is defined as", |
| "cite_spans": [ |
| { |
| "start": 424, |
| "end": 444, |
| "text": "(Nivre et al., 2008)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 466, |
| "end": 489, |
| "text": "(McCallum et al., 2000;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 490, |
| "end": 512, |
| "text": "Lafferty et al., 2001)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 515, |
| "end": 526, |
| "text": ". . . , x N", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Notation. Let x = x 1 ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Y(x) def = {BOS \u2022 v \u2022 EOS | v \u2208 V <n max }", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where \u2022 is string concatenation and V <n max (x) is the set of all subsets of V of size < n max (x). In words, every valid sequence begins and ends with distinguished tokens (BOS and EOS, respectively). 1 Furthermore, each sequence has at most length n max (x)-which is typically dependent on x-a restriction we impose to ensure termination. Some applications may require a stronger coupling between Y(x) and x (e.g., |x| = |y|). We drop the dependence of Y and n max on x when it is clear from context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Scoring. We consider a general additively decomposable scoring model of the form", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "score(x, y) = N y t=1 score(x, y <t \u2022 y t ) (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "This framework covers a variety of modeling methodologies including probabilistic transducers (both globally and locally normalized) and nonprobabilistic models such as maximum-margin techniques (Taskar et al., 2004) . Most importantly, (4) covers MAP decoding (2) of neural sequenceto-sequence models\u00e0 la Sutskever et al. 2014: 2", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 216, |
| "text": "(Taskar et al., 2004)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "score s2s (x, y <t \u2022 y t ) = log p(y t | y <t , x) (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We note that (5) is the scoring function used for decoding many language generation models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Beam search. The worst-case running time of exactly computing (1) is exponential in n max ; namely, O(|V| n max ). 3 Beam search is a commonly used approximation to (1) in NMT and language generation tasks. It is used in many (if not most) state-of-the-art NLP systems Serban et al., 2017; Edunov et al., 2018; Yang et al., 2019) . Beam search may be understood as a pruned version of the classic path-search algorithm, breadth-first search (BFS), where the breadth is narrowed to the beam size k. Pseudocode is given in (1). Although, beam search does not solve (1) exactly, it is a surprisingly useful approximation for NLP models. In many settings, beam 1 BOS and EOS are typically members of V. Often, EOS counts towards the n max length limit while BOS does not. This is reflected in (3).", |
| "cite_spans": [ |
| { |
| "start": 269, |
| "end": 289, |
| "text": "Serban et al., 2017;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 290, |
| "end": 310, |
| "text": "Edunov et al., 2018;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 311, |
| "end": 329, |
| "text": "Yang et al., 2019)", |
| "ref_id": "BIBREF46" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "2 To see why, apply exp (an order-preserving transforma- ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "tion): exp(score s2s (x, y)) = exp Ny t=1 log p(y t | y <t , x) = Ny t=1 p(y t | y <t , x) = p(y | x).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "for y \u2208 V : 9: s \u2190 score(x, y \u2022 y)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "10:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "B.add( s, y \u2022 y )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "11:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "B t \u2190 B.top(k) 12: return B.max()", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "search outperforms exact methods in terms of downstream evaluation (Koehn and Knowles, 2017; Stahlberg and Byrne, 2019) . For the remainder of this paper, we will pivot our attention away from exact solutions to (1) to exact solutions to the beam search output.", |
| "cite_spans": [ |
| { |
| "start": 67, |
| "end": 92, |
| "text": "(Koehn and Knowles, 2017;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 93, |
| "end": 119, |
| "text": "Stahlberg and Byrne, 2019)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Definition 2.1. k-optimal hypothesis. We say that a hypothesis is k-optimal if it is the top hypothesis returned by beam search with beam size k.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Transduction", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We develop a meta-algorithm that is parameterized by several choice points. Our general search algorithm for decoding (Alg. 2) takes an arbitrary prioritization function, stopping criterion, and search heuristic. With certain values of these attributes, we recover many common search algorithms: greedy search, beam search, best-first search (Dijkstra, 1959) , and A * search (Hart et al., 1968) . We propose an alternate prioritization function for beam search that allows for faster decoding while still returning the same k-optimal set of hypotheses. 4 Often, the score function is additively decomposable in t, such as (5). Implementations can exploit this fact to make each score evaluation (line 9) O(1) rather than O(t). We did not make this implementation detail explicit in Alg. 1 or Alg. 2 for generality and simplicity.", |
| "cite_spans": [ |
| { |
| "start": 342, |
| "end": 358, |
| "text": "(Dijkstra, 1959)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 376, |
| "end": 395, |
| "text": "(Hart et al., 1968)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 554, |
| "end": 555, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A * Beam Search", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Algorithm 2 General decoding scheme. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A * Beam Search", |
| "sec_num": "3" |
| }, |
| { |
| "text": "for y \u2208 V : 13: s \u2190 score(x, y \u2022 y) 14: s h \u2190 s+ h(x, y \u2022 y) 15: Q.push( s h , y \u2022 y )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A * Beam Search", |
| "sec_num": "3" |
| }, |
| { |
| "text": "16: return Q.pop() if not Q.empty() else null", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A * Beam Search", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Here we review the components of our meta algorithm (the highlighted sections in Alg. 2) that can be varied to recover different search strategies: 1 : y \u00d7 y \u2192 {True, False}. A priority queue Q maintains the set of active hypotheses. Elements in this set are ordered according to a generic comparator . When its peek() (or pop()) methods are called, the first element ordered by is returned (or returned and removed). Recovering Beam Search. To recover beam search from Algorithm 2, we use the choice points from Table 1 . Explicitly, the comparator prioritizes hypotheses from earlier time steps first, but breaks ties with the hypotheses' scores under the model. We note that while the standard algorithm for beam search does not prioritize by score within a time step, variations of the algorithm use this strategy so they can use early-stopping strategies (Klein et al., 2017; . Beam search terminates once either all hypotheses end in EOS or the queue is empty (i.e., when the k beams have been extended n max time steps but none end in EOS). In the second case, no complete hypothesis is found. Finally, choosing the heuristic h(x, y) = 0 makes the algorithm a case of standard best-first search. Note that, while standard beam search returns a set, Alg 2 only returns the k-optimal hypothesis. This behavior is sufficient for the majority of use cases for beam search. However, if the full set of k hypotheses is desired, the stopping criterion can be changed to evaluate true only when k hypotheses are complete. Under the other beam search settings, this would probably return the same set as beam search (see \u00a7 4.1).", |
| "cite_spans": [ |
| { |
| "start": 860, |
| "end": 880, |
| "text": "(Klein et al., 2017;", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 513, |
| "end": 520, |
| "text": "Table 1", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Choice Points of 2", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Beam Search Best-First Beam Search A * Beam Search 1 s h , y s h , y \u21d0\u21d2 |y| < |y| s h , y s h , y \u21d0\u21d2 s h > s h s h , y s h , y \u21d0\u21d2 s h >", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Choice Points of 2", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Breadth-First Search Best-First Search A * Search 1 s h , y s h , y \u21d0\u21d2 |y| < |y| s h , y s h , y \u21d0\u21d2 s h > s h s h , y s h , y \u21d0\u21d2 s h >", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Choice Points of 2", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Recovering A * . To recover the traditional A * search algorithm, we use the comparator that prioritizes hypotheses with a higher score first; ties are broken by hypothesis length. The algorithm terminates when the first item of Q contains an EOS. If we take k = \u221e, best-first beam search recovers A * . Any admissible heuristic may be used for h(x, y).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Choice Points of 2", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Definition 3.1. Admissible Heuristic. A heuristic h is admissible if it never overestimates the future cost-or underestimates the future reward-of continuing down a path.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Choice Points of 2", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In its original form, A * search may traverse the entire O(|V| n max ) graph, which as discussed earlier, is intractable for many decoding problems. While standard beam search addresses this problem by limiting the search space, it still has computational inefficiencies-namely, we must analyze k hypotheses of a given length (i.e., time step), regardless of how poor their scores may already be, before considering longer hypotheses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Best-First Beam Search", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "However, prioritization by length is not strictly necessary for finding a k-optimal hypothesis. As is done in A * , we can use score as the prioritization scheme and still guarantee optimality-or koptimality-of the paths returned by the algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Best-First Beam Search", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We define A * beam search as the A * algorithm where breadth is limited to size k. Further, we define best-first beam search as the case of A * beam search when no heuristic is used (see Table 1 for algorithm settings). This formulation has two large advantages over standard beam search: (1) we gain the ability to remove paths from the queue that are guaranteed to fall off the beam and (2) we can terminate the algorithm the first time a complete hypothesis is encountered. We can therefore reduce the computation required for decoding while still returning the same set of results.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 187, |
| "end": 194, |
| "text": "Table 1", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Best-First Beam Search", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The mathematical property that makes this short-circuiting of computation possible is the monotonicity of the scoring function. Note that not all scoring functions are monotonic, but many important ones are, including log-probability (5). We discuss effective approximations for popular non-monotonic scoring functions in \u00a7 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Best-First Beam Search", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Definition 3.2. Monotonicity. A scoring function score(\u2022, \u2022) is monotonic in t if for all x, y <t = y 1 . . . y t\u22121 , y t \u2208 V, 1 \u2264 t \u2264 n max score(x, y <t ) \u2265 score(x,y <t \u2022 y t )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Best-First Beam Search", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Clearly, (5) is a monotonic scoring function in t because score s2s \u2264 0, that is, the score of a partial hypothesis y <t can only decrease if we extend it by another symbol y t . This implies we can order our search according to score(x, y <t ) without fear of overlooking a hypothesis whose score would increase over time. Furthermore, once k hypotheses of a given length t have been evaluated, we no longer need to consider any hypothesis where |y| < t because such hypotheses would necessarily fall off the beam. We can therefore remove such hypotheses from the queue and avoid wasting computational power on their evaluation. We prove this formally in \u00a7 4.1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Best-First Beam Search", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Another implication of the monotonicity property of score is that we may terminate bestfirst beam search once a hypothesis containing EOS is encountered (i.e., the end state is found). If the full set of k complete hypotheses is desired, then we simply continue until k hypotheses have reached EOS. We prove the k-optimality of these hypotheses under best-first beam search in \u00a7 4.1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Best-First Beam Search", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Standard beam search forms a separate set of active hypotheses for each time step, that is, each B t is its own set. Once B t has been narrowed down to the top k, the previous B <t can be forgotten. However in best-first beam search, because hypotheses are not evaluated in order of time step, we may need to keep B t from several time steps at any given point.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A naive implementation of best-first beam search is to keep a single priority queue with all the active hypotheses ordered by current score. However, each push to the queue would then require O(log(n max k|V|)) time. We can reduce this runtime by instead keeping a priority queue of beams, where the priority queue is ordered by the highest-scoring hypothesis from each beam. Further, each beam can be represented by a minmax queue (Atkinson et al., 1986) ; this allows us to limit the size of B t to k: we can check in O(1) time if a hypothesis is in the top-k before adding it to B t .", |
| "cite_spans": [ |
| { |
| "start": 432, |
| "end": 455, |
| "text": "(Atkinson et al., 1986)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A potential inefficiency, which we avoid, comes from updating B t+1 , which we must do when evaluating a hypothesis from B t . Because all beams are stored in a queue, there is no guarantee of the location in the queue of B t+1 . To avoid O(n max ) lookup, we can keep a pointer to each beam, indexed by t making the lookup O(1). However, we acquire a O(log n max ) term to update the queue of beams as B t+1 may change priority.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Memory-Reduced Best-First Beam Search. A major drawback of the A * algorithm is its memory usage, which in the worst-case is O(b d ) for breadth width b and maximum depth d. In the A * formulation of beam search, where the breadth width is limited to the beam size, this amounts to worst-case O(k \u2022 n max ) memory usage, where standard beam search has O(k) memory usage. Whereas in many settings the multiplicative factor may be insignificant, for neural sequence models it can be prohibitive; this is due to the large amount of memory required to store each hypothesis (e.g., prior hidden states needed to compute subsequent scores for scoring functions parameterized by neural networks).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We propose a variant of best-first beam search that limits memory usage, that is, the queue capacity. Specifically, if we reach the chosen queue capacity, we remove the worst scoring active hypothesis from the earliest active time step. This can easily be done in O(1) time given our pointer to each beam.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We show the equivalence of the top hypothesis 6 returned by beam search and best-first beam search when score(\u2022, \u2022) is monotonically decreasing in t, length-based prioritization is used, and the beam size k is the same for both algorithms. Without loss of generality, we hold x constant in all the following proofs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Note that we take the terms pop and push from queue terminology. Specifically, ''popping a hypothesis'' refers to making it past line 7 of Alg. 2, where a hypothesis y is expanded by y t \u2208 V. In path search terminology, this would be equivalent to visiting a node and adding the edges from that node as potential paths to explore. Lastly, we refer to the priority queue used by beam search and best-first beam search as Q BS and Q A * , respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Lemma 4.1. Best-first beam search evaluates all hypotheses of a given length t in order of their score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Proof. We prove the lemma by induction. The lemma holds trivially for the base case of hypotheses of length 0 because the only hypothesis of length 0 is BOS . Now, by the inductive hypothesis, suppose Lemma 4.1 holds for all hypotheses of length < t. We will show it must also hold for hypotheses of length t. Consider two competing hypotheses: y = y <t \u2022 y t and y = y <t \u2022 y t . Note that |y <t | = |y <t | = t \u2212 1. Suppose score(x, y ) < score(x, y).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Case 1: score(x, y <t ) < score(x, y <t ). Then by induction, y <t popped first and y is pushed to Q before y . Because score(x, y ) < score(x, y), y will be popped before y .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Case 2: score(x, y <t ) < score(x, y <t ). Then by induction, y <t is popped first and y is added to Q before y. But, because score(x, y ) < score(x, y) \u2264 score(x, y <t ) by monotonicity, then y <t will be popped before y . Consequently, y will be pushed to Q before y is evaluated. By the rules of the priority queue y will be evaluated before y .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Case 3: score(x, y ) = score(x, y). The lemma holds if either y or y is popped first.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "By the principle of induction, Lemmma 4.1 holds for all t \u2208 N >0 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Lemma 4.2. The first hypothesis that best-first beam search pops that ends in EOS is k-optimal.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Proof. Let y be the first hypothesis popped by best-first beam search ending in EOS. By rules of the priority queue, no other active hypothesis has a higher score than y. Additionally, by monotonicity of the scoring function, no other hypothesis can subsequently have score greater than y. Therefore y must be k-optimal.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Lemma 4.3. If best-first beam search pops a hypothesis, then beam search necessarily pops that same hypothesis. Proof. We prove the lemma by induction on hypothesis length. The base case holds trivially: For hypotheses of length 0, both best-first beam search and beam search must pop the BOS as it is the only item in the queue after initialization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "By the inductive hypothesis, suppose Lemma 4.3 holds for hypotheses of length < t. Suppose bestfirst beam search pops a hypothesis y = y <t \u2022 y t of length t.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Case 1: Best-first beam search pops k hypotheses of length t \u2212 1 before popping y, which is of length t. The sets of hypotheses of length t \u2212 1 that each algorithm pops are necessarily the same by the inductive hypothesis and the fact that they have the same cardinality. If best-first beam search pops y, which is of length t, then it must be in the top-k highest-scoring hypotheses of length t in Q A * by the rules of the priority queue. Consequently, it must be in the top-k in Q BS .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Case 2: Best-first beam search has popped fewer than k hypotheses of length t \u2212 1 before popping y. Then, all remaining hypotheses of length t \u2212 1 in Q A * must have score(x, y <t ) < score(x, y) by the rules of the priority queue. By the monotonicity of the score function, all extensions of those y <t will also have score(x, y <t \u2022 y t ) < score(x, y). Because none of y <t \u2022 y t has greater score than y, y must be in B t .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Corollary 4.3.1. Best-first beam search will never pop more hypotheses than beam search.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Theorem 4.4. Once best-first beam search has popped k hypotheses of length t, hypotheses from time steps < t do not need to be popped.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Proof. This follows from Lemma 4.1. If k hypotheses of length t have been popped, then these must be the top-k hypotheses from time step t. Therefore no hypothesis from time step < t that is still in Q A * would be in the top-k at time step t. Theorem 4.5. Let H BS and H A be the set of k hypotheses returned by beam search and best-first beam search, respectively. H BS = H A .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Proof. Because |H BS | = |H A | = k, we only need to show y \u2208 H BS =\u21d2 y \u2208 H A .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Suppose, by way of contradiction, there exists a hypothesis y \u2208 H BS such that y \u2208 H A . If y \u2208 H A then we must not pop the prefix y <t (where y = y <t \u2022 y t:|y| ) for some time step t < |y|.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Case 1: At some time step t + j (j \u2265 0), we pop k partial hypotheses {y", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "(1) \u2264t+j , . . . , y (k) \u2264t+j } where y \u2264t+j \u2208 {y (1) \u2264t+j , . . . , y (k)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2264t+j }. By Lemma 4.1, it must be that score(x, y (i) \u2264t+j ) > score(x, y \u2264t+j ) \u2200i \u2208 1, . . . , k. This implies that for beam search, y \u2264t+j would not be in the top-k paths at time step t + j since by Lemma 4.3, paths {y", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "(1) \u2264t+j , . . . , y (k)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2264t+j } would also be evaluated by beam search. Therefore y cannot be in H BS , which is a contradiction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Case 2: For no time step t + j (j \u2265 0) do we pop k paths. This can only happen if the algorithm stops early, namely, we have found k complete hypotheses y (1) , . . . , y (k) . If this is the case, then by rules of the priority queue, each y (1) , . . . , y (k) must have score greater than score(x, y <t ). By monotonicity of the score function, score(x, y (i) ) > score(x, y). This implies y cannot be in H BS , which is a contradiction.", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 174, |
| "text": "(k)", |
| "ref_id": null |
| }, |
| { |
| "start": 258, |
| "end": 261, |
| "text": "(k)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Non-monotonic Scoring Functions. Nonmonotonic scoring functions (Definition 3.2) break the assumptions of \u00a7 4.1, in which case best-first beam search is not guaranteed to return a k-optimal hypothesis. However, when the scoring function is boundable from above, we can alter the original stopping criterion ( 2 in Alg. 2) such that k-optimality is again guaranteed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Given our assumed restriction on the search space-namely, |y \u2208 Y(x)| \u2264 n max (x)-we can upper-bound the maximal score of any hypothesis under the scoring function in use. Formally, for any function score we have:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "stop(Q) \u21d0\u21d2 score(x,\u0177) \u2265 score(x, y ) + U(x, y ) \u2200y \u2208 Q", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where\u0177 is the best complete hypothesis found so far and U(x, y ) is the score functiondependent upper bound on how much the score of y can increase as y is expanded further. 7", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In this situation, best-first beam search only terminates once no other hypothesis in Q can have a score greater than the best finished hypothesis. We note that Huang et al. (2017) use a similar scheme for optimal stopping with bounded length normalization. We discuss examples of non-monotonic scoring functions in \u00a7 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "A Note on Heuristics. Our analysis shows the equivalence of beam search and best-first beam search, that is, when h(x, y) = 0. The analysis does not hold for arbitrary admissible heuristics. A poor heuristic (e.g., one that grossly overestimates the future score of continuing down one path) may cause other items to be pruned from best-first beam search that otherwise would have remained on the beam in standard beam search.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correctness", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Theorem 4.6. The runtime of best-first beam search is O(n max k (|V| log(k) + log(n max )))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Runtime", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Proof. We pop at most n max \u2022 k items. Each pop requires us to push |V| items. Each push requires log(k) time when the priority queue is implemented with a min-max heap (Atkinson et al., 1986) and incrementally pruned so that it has no more than k items. After pushing those |V| items, we have to perform a percolation in the priority queue of priority queues, which requires log(n max ) time. This yields O(n max k (|V| log(k)+ log(n max ))) time.", |
| "cite_spans": [ |
| { |
| "start": 169, |
| "end": 192, |
| "text": "(Atkinson et al., 1986)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Runtime", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Theorem 4.7. The runtime of standard beam search is O(n max k |V| log(k)).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Runtime", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Proof. The proof is the same as Theorem 4.6, but we can forgo the percolation step in the queue of queues because standard beam search proceeds in order of hypothesis length. This yields O(n max k|V| log(k)).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Runtime", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Although the theoretical bound of best-first beam search has an additional log factor compared with standard beam search, we find this to be negligible in practice. Rather, we find number of calls to score, the scoring function under our model (e.g., a neural network), is often the bottleneck operation when decoding neural networks (see \u00a7 6 for empirical evidence). In terms of this metric, the beam search algorithm makes O(kn max ) calls to score, as score is called once for each active hypothesis in B and B may evolve for n max rounds. The worst-case number of calls to score will be the same as for beam search, which follows from Lemma 4.3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Runtime", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Even before the findings of Stahlberg and Byrne (2019) , it was well known that the best-scoring hypothesis with respect to the traditional likelihood objective can be far from ideal in practice Murray and Chiang, 2018; Yang et al., 2018) . For language generation tasks specifically, the results returned by neural models using the standard scoring function are often short and default to high-frequency words (Vinyals and Le, 2015; Shen et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 54, |
| "text": "Stahlberg and Byrne (2019)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 195, |
| "end": 219, |
| "text": "Murray and Chiang, 2018;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 220, |
| "end": 238, |
| "text": "Yang et al., 2018)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 411, |
| "end": 433, |
| "text": "(Vinyals and Le, 2015;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 434, |
| "end": 452, |
| "text": "Shen et al., 2016)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "To alleviate such problems, methods that revise hypothesis scores to incorporate preferences for longer, less repetitive, or more diverse options have been introduced and are often used in practice. While most such techniques change the scoring function such that it is no longer monotonic, we can still guarantee the k-optimality of the returned hypothesis for (upper) bounded scoring functions using the methods discussed in \u00a7 4.1. In the remainder of this section, we present alternate scoring schemes adapted to work with best-first beam search. Additionally, we present several heuristics which, while breaking the k-optimality guarantee, provide another set of decoding strategies worth exploring.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Length Normalization. Length normalization is a widely used hypothesis scoring method that aims to counteract the propensity for shorter sequences to have higher scores under neural mod-els; this is done by normalizing scores by hypothesis length (see Murray and Chiang [2018] for more detail).", |
| "cite_spans": [ |
| { |
| "start": 252, |
| "end": 276, |
| "text": "Murray and Chiang [2018]", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For early stopping in beam search with length normalization, propose bounding the additive length reward as the minimum of a pre-determined optimal sequence length ratio r and the final sequence length N y :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "score LN (x, y) = score(x, y) + \u03b2 \u2022 min{r|x|, N y } (7)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where \u03b2 is the scaling parameter for the reward. We note, however, that the same can be done with the maximum sequence length n max such that the traditional length reward used by is recovered:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "score LN (x, y) = score(x, y) + \u03b2 min{n max , N y } = score(x, y) + \u03b2N y", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We formally propose two methods for length normalization. We use the scoring functions in 7or (8) with either: (1) the following heuristic:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "h(x, y) = 0 for y.last () = EOS \u03b2 max{b \u2212 |y|, 0} for y.last () = EOS (9)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where b can be r|x| or n max ; 8 or (2) stopping criterion as in (6) albeit with scoring function score LN and upper-bound function:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "U(x, y) = \u03b2 max{0, b \u2212 |y|}", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Despite their similarities, these two methods are not guaranteed to return the same results. Whereas the second method will return the same k-optimal hypotheses as beam search, using a heuristic during pruned search means we can no longer guarantee the k-optimality of the results with respect to the scoring function as the heuristic may push hypotheses off of the beam. We present experimental results for both methods in \u00a7 6.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Mutual Information. Maximum mutual information decoding (Li et al., 2016) aims to alleviate the inherent preference of neural models for highfrequency tokens when using the log-probability decoding objective. Rather than choosing the hypothesis y to maximize conditional probability with respect to the input x, we instead choose y to maximize pointwise mutual information (PMI):", |
| "cite_spans": [ |
| { |
| "start": 56, |
| "end": 73, |
| "text": "(Li et al., 2016)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "PMI(x; y) = log p(x, y) p(x)p(y)", |
| "eq_num": "(11)" |
| } |
| ], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Note that (11) is equivalent to log p(y|x) p(y) , which can be rewritten as log p(y | x) \u2212 log p(y), making the objective additive and thus (11) can conform to (4).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "From this last form, we can see how mutual information decoding penalizes high-frequency and generic outputs; the negative p(y) term, as Li et al. (2016) point out, acts as an ''anti-language model.'' One unfortunate side effect of this objective is that ungrammatical and nonsensical outputs, which have probabilities close to 0 under a language model like p(y), end up with high scores because of the second term in the score function. To address this problem, and to upper-bound the scoring function, we propose lower-bounding the language model term by a hyperparameter 1 \u2265 \u03b5 > 0. We additionally use the strength hyperparameter \u03bb employed by Li et al. (2016) :", |
| "cite_spans": [ |
| { |
| "start": 137, |
| "end": 153, |
| "text": "Li et al. (2016)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 647, |
| "end": 663, |
| "text": "Li et al. (2016)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "score PMI (x, y) = log p(y | x) \u2212 \u03bb log max{p(y), \u03b5} (12)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Similarly to our methods for length normalization, we can use the scoring function in (12) either with the heuristic:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h(x, y) = 0 for y.last () = EOS \u2212\u03bb log \u03b5(n max \u2212|y|) for y.last () = EOS", |
| "eq_num": "(13)" |
| } |
| ], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "or with stopping criterion as in (6) albeit with score PMI and upper-bound function:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "U (x, y) = \u2212\u03bb log \u03b5(n max \u2212 |y|)", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Because \u2212\u03bb log \u03b5 is the best possible score at any given time step, clearly we can bound the increase in score PMI by the above function. However, as with our length normalization strategy, we lose the koptimality guarantee with the heuristic method for mutual information decoding. We present experimental results for both methods in \u00a7 6.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scoring Functions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We run our algorithm on several language-related tasks that typically use beam search for decoding: NMT and AS. Specifically, experiments are performed on IWSLT'14 De-En (Cettolo et al., 2012) , WMT'17 De-En (Bojar et al., 2017) , MTTT Fr-En (Duh, 2018) , and CNN-DailyMail (Hermann et al., 2015) using both Transformers (Vaswani et al., 2017) and Convolutional sequence-to-sequence models (Gehring et al., 2017) . For reproducibility, we use the data pre-processing scripts provided by fairseq (Ott et al., 2019) and follow their methods for training sequence transduction models. Hyperparameters are set in accordance with previous works. Specifically, on IWSLT'14 and MTTT tasks, we follow the recommended Transformer settings for IWSLT'14 in fairseq, 9 which are based on Vaswani et al. (2017) and Gehring et al. (2017) . Hyperparameters for models trained on the WMT task are set following version 3 of the Tensor2Tensor toolkit (Vaswani et al., 2018) . We use byte-pair encoding (BPE; Sennrich et al. 2016) for all languages. Vocabulary sizes for WMT and IWSLT'14 are set from recommendations for the respective tasks in fairseq; for the MTTT tasks, vocabulary sizes are tuned on models trained with standard label-smoothing regularization. Similarly, the CNN/DailyMail dataset is pre-processed and uses BPE following the same steps as (Lewis et al., 2019) ; model hyperparameters are likewise copied. Details are available on fairseq's Web site. 10 We use BLEU (Papineni et al., 2002) (evaluated using SacreBLEU [Post, 2018] ) for MT metrics and ROUGE-L (Lin, 2004) for abstractive summarization metrics. We build our decoding framework in SGNMT. 11", |
| "cite_spans": [ |
| { |
| "start": 170, |
| "end": 192, |
| "text": "(Cettolo et al., 2012)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 208, |
| "end": 228, |
| "text": "(Bojar et al., 2017)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 242, |
| "end": 253, |
| "text": "(Duh, 2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 321, |
| "end": 343, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": null |
| }, |
| { |
| "start": 390, |
| "end": 412, |
| "text": "(Gehring et al., 2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 495, |
| "end": 513, |
| "text": "(Ott et al., 2019)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 776, |
| "end": 797, |
| "text": "Vaswani et al. (2017)", |
| "ref_id": null |
| }, |
| { |
| "start": 802, |
| "end": 823, |
| "text": "Gehring et al. (2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 934, |
| "end": 956, |
| "text": "(Vaswani et al., 2018)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 1342, |
| "end": 1362, |
| "text": "(Lewis et al., 2019)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1453, |
| "end": 1455, |
| "text": "10", |
| "ref_id": null |
| }, |
| { |
| "start": 1468, |
| "end": 1491, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1519, |
| "end": 1531, |
| "text": "[Post, 2018]", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1561, |
| "end": 1572, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In Table 2 , we report values as the average number of calls to the scoring function per input; we do not use wall-clock time as this is heavily dependent on hardware. See Fig. 1 Table 2 : Average number of calls (rounded to nearest whole digit) to score, the sequence transduction model, per generated sequence when using different decoding algorithms. Green percentages are performance improvements over standard beam search. Beam search (ES) refers to the OpenNMT early-stopping method (Klein et al., 2017) . All methods provably return the same solution and thus, evaluation metrics (in dark blue) for a given beam size are identical. experiments were run on. For reference, in our experiments, the scoring function took on average > 99% of the total computation time, even with larger beam sizes, when overhead of the search algorithm is most significant. We find that best-first (BF) beam search leads to significant speed-ups over both traditional beam search and beam search with early stopping, with a performance increase 12 of \u2248 8x for a beam size of 500. We likewise find that best-first beam search offers speed-ups over early stopping methods that are not guaranteed to return the same results as standard beam search (see Table 3 ).", |
| "cite_spans": [ |
| { |
| "start": 489, |
| "end": 509, |
| "text": "(Klein et al., 2017)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 172, |
| "end": 178, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 179, |
| "end": 186, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1237, |
| "end": 1244, |
| "text": "Table 3", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Running Time", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "We experiment with both forms of length normalization presented in \u00a7 5 and provide results Bahdanau et al. (2015) and ''early'' refers to the stopping criterion of . Note that neither method is guaranteed to return the same result as standard beam search. Search error and performance increases are with respect to standard beam search.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 113, |
| "text": "Bahdanau et al. (2015)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Length Normalization", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "in Table 4 . We find that both methods, that is, changing the stopping criterion and using a heuristic during search, provide improvements over baseline BLEU scores albeit with different hyperparameter settings; increases are similar to improvements reported by Murray and Chiang (2018) . Notably, using a heuristic causes a large percentage of search errors with respect to standard beam search using the same scoring function. However, the difference in results appears to be beneficial in terms of BLEU. ", |
| "cite_spans": [ |
| { |
| "start": 262, |
| "end": 286, |
| "text": "Murray and Chiang (2018)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 4", |
| "ref_id": "TABREF9" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Length Normalization", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We train a language model on the IWSLT dataset and use it to calculate p(y) from (12) as marginalizing over y is intractable (see Li et al. [2016] for further justification). We run experiments using both of the methods discussed in \u00a7 5 and present results in Table 5 . We find that both methods provide results of equivalent BLEU score compared with the baseline output, namely, results obtained with the unbounded PMI objective and beam search. Again, despite the high search error rate demonstrated by the heuristic method, evaluation metrics are still comparable.", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 146, |
| "text": "Li et al. [2016]", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 260, |
| "end": 267, |
| "text": "Table 5", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Mutual Information", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "We conduct a set of experiments where we limit total queue capacity to k\u2022\u03b3 for \u03b3 \u2208 {1, . . . , n max }, as described in \u00a7 3.3, and report the BLEU score of the resulting set of hypotheses. As shown in Table 6 , we find that restricting the queue capacity does not harm output quality and, additionally, leads to even greater runtime performance increase. For example, runtime for decoding of IWSLT'14 with a beam size of 10 can be improved by > 3x while returning results with better evaluation metrics. We find that improvements are even more pronounced for larger beam sizes. Across beam widths and tasks, we find that search error (with respect to standard beam search) is quite low for \u03b3 = 5. Additionally, for smaller \u03b3, the change in BLEU Note that \u03b3 = n max is the standard best-first beam search algorithm. Performance increases are over standard beam search. Search error is with respect to beam search with same beam width.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 201, |
| "end": 208, |
| "text": "Table 6", |
| "ref_id": "TABREF12" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Memory Usage", |
| "sec_num": "6.4" |
| }, |
| { |
| "text": "score demonstrates that search error in this context does not necessarily hurt the quality of results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Memory Usage", |
| "sec_num": "6.4" |
| }, |
| { |
| "text": "Our work is most similar to that of Zhou and Hansen (2005) , who propose beam stack search. However, they are focused on exact inference and still evaluate hypotheses in breadth-first order.", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 58, |
| "text": "Zhou and Hansen (2005)", |
| "ref_id": "BIBREF47" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Additionally, their algorithm requires O(n max k) memory; although best-first beam search has the same requirements, we introduce effective methods for reducing them, namely, memoryreduced best-first beam search. propose and prove the optimality of an early-stopping criterion for beam search. The authors find in practice though that reduction in computation from their algorithm was generally not significant. We build on this work and introduce additional methods for avoiding unnecessary computation. Our method leads to better performance, as shown in Table 2 . Klein and Manning (2003) use A * for PCFG parsing; however, they use the un-pruned version for exact search, which is not applicable for NMT or AS as the memory requirements of the algorithm are far too large for these tasks. Subsequently, Pauls and Klein (2009) provide a method for pruning this search algorithm, albeit using a threshold rather than explicitly limiting the state space. Huang et al. (2012) also adapt A * for a k-best decoding algorithm. Although their methods differ notably from ours, they likewise use pruning techniques that allow for substantial speedups.", |
| "cite_spans": [ |
| { |
| "start": 567, |
| "end": 591, |
| "text": "Klein and Manning (2003)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 807, |
| "end": 829, |
| "text": "Pauls and Klein (2009)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 956, |
| "end": 975, |
| "text": "Huang et al. (2012)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 557, |
| "end": 564, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Stahlberg and Byrne (2019) create an exact inference algorithm for decoding and use it to analyze the output of neural NMT models. Whereas they likewise utilize the monotonicity of the scoring function to make their method tractable, they do not focus on speed or mimicking the results of standard beam search.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We propose best-first beam search, an algorithm that allows for faster decoding while still guaranteeing k-optimality. We provide results on several sequence-to-sequence transduction tasks that show the speed-ups that our algorithm provides over standard beam search for decoding neural models. We adapt several popular alternate scoring functions to best-first beam search and provide a framework that can be used to adapt other scoring methods such as coverage normalization or diverse beam search (Vijayakumar et al., 2016) . We also provide a memory-reduced version of our algorithm, which returns competitive results in a fraction of the time needed for standard beam search.", |
| "cite_spans": [ |
| { |
| "start": 500, |
| "end": 526, |
| "text": "(Vijayakumar et al., 2016)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "This can be improved if, for example, score(\u2022, \u2022) admits a low-order Markov factorization(Viterbi, 1967;Vieira et al., 2016). We do not discuss that setting in this paper because it limits the scoring model's expressive power.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "stop(\u2022) : Collection y \u2192 {True, False}. The algorithm terminates according to configurable stopping criterion based on the current set of elements in Q.5 If the last token of y is the end symbol (e.g., EOS), then y is not expanded any further. One can either regard y as any other hypothesis albeit with y \u2022 y t = y or keep appending EOS (i.e., y \u2022 y t = y \u2022 EOS ) so that time step and length can be regarded as synonymous. We adopt the latter standard for comparability with subsequent algorithms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "k \u2208 N >0 . Only k paths of a given length are considered. If the algorithm has already encountered k paths of a given length, subsequent paths of that length are not evaluated. If we take k = \u221e, we recover unpruned search algorithms.4 h(\u2022, \u2022) : x \u00d7 y \u2192 R. A heuristic functionh(x, y) can be used during search to change the priority in which paths are evaluated. We note that with pruning, a heuristic may change the value of the k-optimal hypothesis (see \u00a7 4.1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Best-first beam search is guaranteed to return the same set of k hypotheses as beam search. We include the proof for only the top hypothesis for simplicity. The proof for set equality follows naturally.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For monotonic scoring functions, we have U (x, y ) = 0.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We enforce r|x| < n max .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/pytorch/fairseq/tree /master/examples/translation.10 https://github.com/pytorch/fairseq/blob /master/examples/bart/README.cnn.md.11 https://github.com/ucam-smt/sgnmt.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Min-max heaps and generalized priority queues", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "D" |
| ], |
| "last": "Atkinson", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Sack", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Santoro", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Strothotte", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "Communications of ACM", |
| "volume": "", |
| "issue": "10", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/6617.6621" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. D. Atkinson, J. R. Sack, N. Santoro, and T. Strothotte, 1986. Min-max heaps and general- ized priority queues. Communications of ACM: 29(10). DOI: https://doi.org/10.1145 /6617.6621", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2015. Neural machine translation by jointly learning to align and translate. In Proceedings of the International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Findings of the 2017 conference on machine translation", |
| "authors": [ |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajen", |
| "middle": [], |
| "last": "Chatterjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Federmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Shujian", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Huck", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Varvara", |
| "middle": [], |
| "last": "Logacheva", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| }, |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Negri", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| }, |
| { |
| "first": "Raphael", |
| "middle": [], |
| "last": "Rubino", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Turchi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-4717" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ond\u0159ej Bojar, Rajen Chatterjee, Christian Federmann, Yvette Graham, Barry Haddow, Shujian Huang, Matthias Huck, Philipp Koehn, Qun Liu, Varvara Logacheva, Christof Monz, Matteo Negri, Matt Post, Raphael Rubino, Lucia Specia, and Marco Turchi. 2017. Findings of the 2017 conference on machine translation. In Proceedings of the Conference on Machine Translation, Volume 2: Shared Task Papers, Copenhagen, Denmark. DOI: https://doi.org/10.18653/v1/W17-4717", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Wit 3 : Web inventory of transcribed and translated talks", |
| "authors": [ |
| { |
| "first": "Mauro", |
| "middle": [], |
| "last": "Cettolo", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Girardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Conference of the European Association for Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mauro Cettolo, Christian Girardi, and Marcello Federico. 2012. Wit 3 : Web inventory of transcribed and translated talks. In Proceedings of the Conference of the European Association for Machine Translation.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A note on two problems in connexion with graphs", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Edsger", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dijkstra", |
| "suffix": "" |
| } |
| ], |
| "year": 1959, |
| "venue": "Numerische Mathematik", |
| "volume": "1", |
| "issue": "1", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/BF01386390" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edsger W. Dijkstra. 1959. A note on two problems in connexion with graphs. Numerische Mathe- matik 1(1). DOI: https://doi.org/10.1007 /BF01386390", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "The multitarget TED talks task", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Duh. 2018. The multitarget TED talks task. http://www.cs.jhu.edu/\u223ckevinduh /a/multitarget-tedtalks/", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Understanding backtranslation at scale", |
| "authors": [ |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing. Brussels, Belgium, Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1045" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sergey Edunov, Myle Ott, Michael Auli, and David Grangier. 2018. Understanding back- translation at scale. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing. Brussels, Belgium, As- sociation for Computational Linguistics. DOI: https://doi.org/10.18653/v1/D18 -1045", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A convolutional encoder model for neural machine translation", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Gehring", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Dauphin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1012" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Gehring, Michael Auli, David Grangier, and Yann Dauphin. 2017. A convolutional encoder model for neural machine translation. In Pro- ceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Vancouver, Canada. Association for Computational Linguistics. DOI: https://doi.org/10.18653/v1", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A formal basis for the heuristic determination of minimum cost paths", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hart", |
| "suffix": "" |
| }, |
| { |
| "first": "Nils", |
| "middle": [ |
| "J" |
| ], |
| "last": "Nilsson", |
| "suffix": "" |
| }, |
| { |
| "first": "Bertram", |
| "middle": [], |
| "last": "Raphael", |
| "suffix": "" |
| } |
| ], |
| "year": 1968, |
| "venue": "IEEE Transactions on Systems Science and Cybernetics", |
| "volume": "4", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TSSC.1968.300136" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter E. Hart, Nils J. Nilsson, and Bertram Raphael. 1968. A formal basis for the heuristic determination of minimum cost paths. IEEE Transactions on Systems Science and Cyber- netics, 4(2). DOI: https://doi.org/10 .1109/TSSC.1968.300136", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Improved neural machine translation with SMT features", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongjun", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei He, Zhongjun He, Hua Wu, and Haifeng Wang. 2016. Improved neural machine transla- tion with SMT features. In Proceedings of the AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Teaching machines to read and comprehend", |
| "authors": [ |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Moritz Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Kocisky", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Lasse", |
| "middle": [], |
| "last": "Espeholt", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Kay", |
| "suffix": "" |
| }, |
| { |
| "first": "Mustafa", |
| "middle": [], |
| "last": "Suleyman", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karl Moritz Hermann, Tomas Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend, Advances in Neural Information Processing Systems.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "When to finish? Optimal beam search for neural text generation (modulo beam size)", |
| "authors": [ |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingbo", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1227" |
| ], |
| "PMID": [ |
| "28564569" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liang Huang, Kai Zhao, and Mingbo Ma. 2017. When to finish? Optimal beam search for neural text generation (modulo beam size). In Proceed- ings of the 2017 Conference on Empirical Methods in Natural Language Processing, Copenhagen, Denmark. Association for Com- putational Linguistics. DOI: https://doi .org/10.18653/v1/D17-1227, PMID: 28564569", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Iterative Viterbi A* algorithm for k-best sequential decoding", |
| "authors": [ |
| { |
| "first": "Zhiheng", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Long", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean-Francois", |
| "middle": [], |
| "last": "Crespo", |
| "suffix": "" |
| }, |
| { |
| "first": "Anlei", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Sathiya", |
| "middle": [], |
| "last": "Keerthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Su-Lin", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiheng Huang, Yi Chang, Bo Long, Jean- Francois Crespo, Anlei Dong, Sathiya Keerthi, and Su-Lin Wu. 2012. Iterative Viterbi A* algorithm for k-best sequential decoding. In Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), Jeju Island, Korea. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A* parsing: Fast exact Viterbi parse selection", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 2003 Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1073445.1073461" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Klein and Christopher D. Manning. 2003. A* parsing: Fast exact Viterbi parse selection. In Proceedings of the 2003 Human Language Technology Conference of the North American Chapter of the Association for Computatio- nal Linguistics. DOI: https://doi.org /10.3115/1073445.1073461", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "OpenNMT: Open-source toolkit for neural machine translation", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuntian", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Senellart", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL 2017, System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-4012" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Klein, Yoon Kim, Yuntian Deng, Jean Senellart, and Alexander Rush. 2017. OpenNMT: Open-source toolkit for neural machine translation. In Proceedings of ACL 2017, System Demonstrations, Vancouver, Canada. Association for Computational Lin- guistics. DOI: https://doi.org/10.18653 /v1/P17-4012", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Six challenges for neural machine translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Knowles", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First Workshop on Neural Machine Translation, Vancouver", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-3204" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn and Rebecca Knowles. 2017. Six challenges for neural machine translation. In Proceedings of the First Workshop on Neural Machine Translation, Vancouver. Association for Computational Linguistics. DOI: https:// /10.18653/v1/W17-3204", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "D" |
| ], |
| "last": "Lafferty", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [ |
| "C N" |
| ], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the International Conference on Machine Learning, ICML '01", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. 2001. Conditional ran- dom fields: Probabilistic models for segment- ing and labeling sequence data. In Proceedings of the International Conference on Machine Learning, ICML '01, San Francisco, CA, USA.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Marjan", |
| "middle": [], |
| "last": "Ghazvininejad", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdelrahman", |
| "middle": [], |
| "last": "Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.703" |
| ], |
| "arXiv": [ |
| "arXiv:1910.13461" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2019. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In arXiv:1910. 13461. DOI: https://doi.org/10.18653 /v1/2020.acl-main.703", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A diversitypromoting objective function for neural conversation models", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016. A diversity- promoting objective function for neural con- versation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, San Diego, California. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "ROUGE: A package for automatic evaluation of summaries", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Text Summarization Branches Out", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. 2004. ROUGE: A package for automatic evaluation of summaries. In Text Sum- marization Branches Out, Barcelona, Spain. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Maximum entropy Markov models for information extraction and segmentation", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "Dayne", |
| "middle": [], |
| "last": "Freitag", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [ |
| "C N" |
| ], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of the International Conference on Machine Learning, ICML 00", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew McCallum, Dayne Freitag, and Fernando C. N. Pereira. 2000. Maximum entropy Markov models for information extraction and segmen- tation. In Proceedings of the International Conference on Machine Learning, ICML 00. San Francisco, CA, USA.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Correcting length bias in neural machine translation", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Murray", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Murray and David Chiang. 2018. Correct- ing length bias in neural machine translation.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6322" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "In Proceedings of the Third Conference on Machine Translation: Research Papers, Belgium, Brussels. Association for Computational Lin- guistics. DOI: https://doi.org/10.18653 /v1/W18-6322", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Parsing the SynTagRus treebank of Russian", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| }, |
| { |
| "first": "Igor", |
| "middle": [ |
| "M" |
| ], |
| "last": "Boguslavsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonid", |
| "middle": [ |
| "L" |
| ], |
| "last": "Iomdin", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 22nd International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1599081.1599162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre, Igor M. Boguslavsky, and Leonid L. Iomdin. 2008. Parsing the SynTagRus treebank of Russian. In Proceedings of the 22nd Interna- tional Conference on Computational Linguis- tics (Coling 2008), Manchester, UK. Coling 2008 Organizing Committee. DOI: https:// doi.org/10.3115/1599081.1599162", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "fairseq: A fast, extensible toolkit for sequence modeling", |
| "authors": [ |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexei", |
| "middle": [], |
| "last": "Baevski", |
| "suffix": "" |
| }, |
| { |
| "first": "Angela", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of NAACL-HLT: Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-4009" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, exten- sible toolkit for sequence modeling. In Pro- ceedings of NAACL-HLT: Demonstrations. DOI: https://doi.org/10.18653/v1 /N19-4009", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "BLEU: A method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the Annual Meeting on Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1073083.1073135" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. BLEU: A method for automatic evaluation of machine translation. In Proceedings of the Annual Meeting on Asso- ciation for Computational Linguistics. DOI: https://doi.org/10.3115/1073083 .1073135", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Association for Computational Linguistics", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Pauls", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of Human Language Technologies: The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1620754.1620835" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Pauls and Dan Klein. 2009. Hierarchical search for parsing. In Proceedings of Human Language Technologies: The 2009 Annual Con- ference of the North American Chapter of the Association for Computational Linguistics, Boulder, Colorado. Association for Computa- tional Linguistics. DOI: https://doi.org /10.3115/1620754.1620835", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A call for clarity in reporting BLEU scores", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6319" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Confer- ence on Machine Translation: Research Pa- pers. DOI: https://doi.org/10.18653 /v1/W18-6319", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch.2016. Neural machine translation of rare words with subword units. In Proceedings of the Annual Meeting of the Association for Com- putational Linguistics. DOI: https://doi .org/10.18653/v1/P16-1162", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Multiresolution recurrent neural networks: An application to dialogue response generation", |
| "authors": [ |
| { |
| "first": "Iulian", |
| "middle": [], |
| "last": "Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Klinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerald", |
| "middle": [], |
| "last": "Tesauro", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartik", |
| "middle": [], |
| "last": "Talamadupula", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iulian Serban, Tim Klinger, Gerald Tesauro, Kartik Talamadupula, Bowen Zhou, Yoshua Bengio, and Aaron Courville. 2017. Multireso- lution recurrent neural networks: An application to dialogue response generation.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Minimum risk training for neural machine translation", |
| "authors": [ |
| { |
| "first": "Shiqi", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongjun", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1159" |
| ], |
| "PMID": [ |
| "27069146" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shiqi Shen, Yong Cheng, Zhongjun He, Wei He, Hua Wu, Maosong Sun, and Yang Liu. 2016. Minimum risk training for neural machine translation. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), Berlin, Germany. Association for Computational Lin- guistics. DOI: https://doi.org/10.18v653 /v1/P16-1159, PMID: 27069146", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Improving beam search by removing monotonic constraint for neural machine translation", |
| "authors": [ |
| { |
| "first": "Raphael", |
| "middle": [], |
| "last": "Shu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideki", |
| "middle": [], |
| "last": "Nakayama", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raphael Shu and Hideki Nakayama. 2018. Im- proving beam search by removing monotonic constraint for neural machine translation. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguis- tics (Volume 2: Short Papers), Melbourne, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "On NMT search errors and model errors: Cat got your tongue?", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Stahlberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Byrne", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1331" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Stahlberg and Bill Byrne. 2019. On NMT search errors and model errors: Cat got your tongue? In Proceedings of the Conference on Empirical Methods in Natural Language Pro- cessing and the 9th International Joint Con- ference on Natural Language Processing (EMNLP-IJCNLP). Hong Kong, China. DOI: https://doi.org/10.18653/v1/D19 -1331", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014, Sequence to sequence learning with neu- ral networks. In Advances in Neural Inform- ation Processing Systems.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Max-margin markov networks", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Taskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Guestrin", |
| "suffix": "" |
| }, |
| { |
| "first": "Daphne", |
| "middle": [], |
| "last": "Koller", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben Taskar, Carlos Guestrin, and Daphne Koller. 2004. Max-margin markov networks. In Ad- vances in Neural Information Processing Systems.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Tensor2tensor for neural machine translation", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Brevdo", |
| "suffix": "" |
| }, |
| { |
| "first": "Francois", |
| "middle": [], |
| "last": "Chollet", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Gouws", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Nal", |
| "middle": [], |
| "last": "Kalchbrenner", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Sepassi", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Samy Bengio, Eugene Brevdo, Francois Chollet, Aidan N. Gomez, Stephan Gouws, Llion Jones, \u0141ukasz Kaiser, Nal Kalchbrenner, Niki Parmar, Ryan Sepassi, Noam Shazeer, and Jakob Uszkoreit. 2018. Tensor2tensor for neural machine translation. CoRR.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Attention is all you need", |
| "authors": [], |
| "year": null, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Attention is all you need. In Advances in Neural Information Processing Systems.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Speed-accuracy tradeoffs in tagging with variable-order CRFs and structured sparsity", |
| "authors": [ |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Vieira", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1206" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tim Vieira, Ryan Cotterell, and Jason Eisner. 2016. Speed-accuracy tradeoffs in tagging with variable-order CRFs and structured sparsity. In Proceedings of the Conference on Empiri- cal Methods in Natural Language Processing. DOI: https://doi.org/10.18653/v1 /D16-1206", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Diverse beam search: Decoding diverse solutions from neural sequence models", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Ashwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Vijayakumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ramprasaath", |
| "middle": [ |
| "R" |
| ], |
| "last": "Cogswell", |
| "suffix": "" |
| }, |
| { |
| "first": "Qing", |
| "middle": [], |
| "last": "Selvaraju", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "J" |
| ], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhruv", |
| "middle": [], |
| "last": "Crandall", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Batra", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashwin K. Vijayakumar, Michael Cogswell, Ramprasaath R. Selvaraju, Qing Sun, Stefan Lee, David J. Crandall, and Dhruv Batra. 2016. Diverse beam search: Decoding diverse solutions from neural sequence models. CoRR, abs/1610.02424.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "A neural conversational model", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TIT.1967.1054010" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals and Quoc V. Le. 2015. A neural conversational model. DOI: https://doi .org/10.1109/TIT.1967.1054010", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Error bounds for convolutional codes and an asymptotically optimum decoding algorithm", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Viterbi", |
| "suffix": "" |
| } |
| ], |
| "year": 1967, |
| "venue": "IEEE Transactions on Information Theory", |
| "volume": "13", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Viterbi. 1967. Error bounds for convolu- tional codes and an asymptotically optimum decoding algorithm. IEEE Transactions on Information Theory, 13(2).", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
| "authors": [ |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshikiyo", |
| "middle": [], |
| "last": "Gouws", |
| "suffix": "" |
| }, |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kato", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideto", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Kazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Stevens", |
| "suffix": "" |
| }, |
| { |
| "first": "Nishant", |
| "middle": [], |
| "last": "Kurian", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Patil", |
| "suffix": "" |
| }, |
| { |
| "first": ";", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Gregory", |
| "suffix": "" |
| }, |
| { |
| "first": "Macduff", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Hughes", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaiser, Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa, Alex Rudnick, Oriol Vinyals, Gregory S. Corrado, Macduff Hughes, and Jeffrey Dean. 2016. Google's neural machine translation system: Bridging the gap between human and machine translation.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Breaking the beam search curse: A study of (re-)scoring methods and stopping criteria for neural machine translation", |
| "authors": [ |
| { |
| "first": "Yilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingbo", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1342" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yilin Yang, Liang Huang, and Mingbo Ma. 2018. Breaking the beam search curse: A study of (re-)scoring methods and stopping criteria for neural machine translation. In Pro- ceedings of the 2018 Conference on Empiri- cal Methods in Natural Language Processing, Brussels, Belgium. Association for Computa- tional Linguistics. DOI: https://doi.org /10.18653/v1/D18-1342", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "XLNet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Russ", |
| "middle": [ |
| "R" |
| ], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Russ R. Salakhutdinov, and Quoc V. Le. 2019. XLNet: Generalized autoregressive pretraining for language understanding. In Advances in Neural Information Processing Systems.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Beamstack search: Integrating backtracking with beam search", |
| "authors": [ |
| { |
| "first": "Rong", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "A" |
| ], |
| "last": "Hansen", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the International Conference on International Conference on Automated Planning and Scheduling, ICAPS05", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rong Zhou and Eric A. Hansen. 2005. Beam- stack search: Integrating backtracking with beam search. In Proceedings of the International Con- ference on International Conference on Auto- mated Planning and Scheduling, ICAPS05.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Number of calls to scoring function score vs. total sequence generation time. Each point is a decoded sequence. Colors represent different model architectures and shapes signify the decoding algorithm used (beam sizes 3 and 10 are included for each). There is no notable difference in the overhead (time-wise) of best-first beam search and beam search.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "TABREF2": { |
| "num": null, |
| "content": "<table><tr><td>2</td><td>stop(Q) \u21d0\u21d2</td><td>stop(Q) \u21d0\u21d2</td><td>stop(Q) \u21d0\u21d2</td></tr><tr><td/><td>y.last() = EOS \u2200y \u2208 Q</td><td>Q.peek().last() = EOS</td><td>Q.peek().last() = EOS</td></tr><tr><td>3</td><td>k = beam size</td><td>k = beam size</td><td>k = beam size</td></tr><tr><td>4</td><td>0</td><td>0</td><td>any admissible heuristic</td></tr></table>", |
| "type_str": "table", |
| "text": "s h or (|y| = |y| and s h \u2265 s h ) or (s h = s h and |y| < |y| ) or (s h = s h and |y| < |y| )", |
| "html": null |
| }, |
| "TABREF3": { |
| "num": null, |
| "content": "<table><tr><td>2</td><td>stop(Q) \u21d0\u21d2</td><td>stop(Q) \u21d0\u21d2</td><td>stop(Q) \u21d0\u21d2</td></tr><tr><td/><td>y.last() = EOS \u2200y \u2208 Q</td><td>Q.peek().last() = EOS</td><td>Q.peek().last() = EOS</td></tr><tr><td>3</td><td>k = \u221e</td><td>k = \u221e</td><td>k = \u221e</td></tr><tr><td>4</td><td>0</td><td>0</td><td>any admissible heuristic</td></tr></table>", |
| "type_str": "table", |
| "text": "s h or (|y| = |y| and s h \u2265 s h ) or (s h = s h and |y| < |y| ) or (s h = s h and |y| < |y| )", |
| "html": null |
| }, |
| "TABREF4": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Values at choice points for various search algorithms. Note that any admissible heuristic may be used for variants of A", |
| "html": null |
| }, |
| "TABREF5": { |
| "num": null, |
| "content": "<table><tr><td/><td colspan=\"2\">IWSLT'14 De-En</td><td/><td/><td colspan=\"2\">MTTT Fr-En</td><td/><td colspan=\"2\">CNN-DailyMail</td></tr><tr><td>k =5</td><td colspan=\"3\">k =10 k = 100 k = 500</td><td colspan=\"2\">k =10 k = 100</td><td>k = 500</td><td>k =5</td><td colspan=\"2\">k =10 k = 100</td></tr><tr><td>(35.6)</td><td>(35.4)</td><td>(34.7)</td><td>(7.9)</td><td>( 33.0)</td><td>(9.9)</td><td>( 1.2)</td><td>( 31.5)</td><td>(30.9)</td><td>(29.1)</td></tr><tr><td>BF beam search 93 (Beam search 115</td><td>229</td><td>2286</td><td>9770</td><td>214</td><td>2066</td><td>8281</td><td>266</td><td>435</td><td>5673</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"2\">for empirical</td></tr><tr><td/><td/><td/><td/><td colspan=\"6\">justification of the correlation between calls to the</td></tr><tr><td/><td/><td/><td/><td colspan=\"6\">scoring function and runtime on the hardware our</td></tr></table>", |
| "type_str": "table", |
| "text": "24%) 169 (36%) 1275 (79%) 1168 (736%) 184 (16%) 867 (138%) 885 (836%) 200 (33%) 305 (43%) 2960 (92%) Beam search (ES) 107 (7%) 210 (9%) 2047 (12%) 7685 (27%) 196 (9%) 1310 (58%) 4182 (98%) 224 (19%) 357 (22%) 3942 (59%)", |
| "html": null |
| }, |
| "TABREF6": { |
| "num": null, |
| "content": "<table><tr><td/><td/><td colspan=\"2\">IWSLT'14 De-En</td><td/></tr><tr><td>k</td><td>method</td><td colspan=\"2\">search error BLEU</td><td># calls</td></tr><tr><td/><td>shrinking</td><td>0%</td><td>35.4</td><td>229 (0%)</td></tr><tr><td>10</td><td>early</td><td>0%</td><td>35.4</td><td>225 (2%)</td></tr><tr><td/><td>BF BS</td><td>\u2212</td><td>35.4</td><td>169 (36%)</td></tr><tr><td/><td>shrinking</td><td>31.7%</td><td colspan=\"2\">13.2 2278 (0%)</td></tr><tr><td>100</td><td>early</td><td>31.7%</td><td colspan=\"2\">13.2 1738 (31%)</td></tr><tr><td/><td>BF BS</td><td>\u2212</td><td colspan=\"2\">34.7 1275 (79%)</td></tr><tr><td/><td/><td colspan=\"2\">WMT'17 De-En</td><td/></tr><tr><td/><td>shrinking</td><td>0%</td><td>28.6</td><td>260 (0%)</td></tr><tr><td>10</td><td>early</td><td>0%</td><td>28.6</td><td>252 (3%)</td></tr><tr><td/><td>BF BS</td><td>\u2212</td><td>28.6</td><td>230 (12%)</td></tr><tr><td/><td>shrinking</td><td>1.7%</td><td colspan=\"2\">26.4 2587 (0%)</td></tr><tr><td>100</td><td>early</td><td>1.7%</td><td colspan=\"2\">26.4 2402 (8%)</td></tr><tr><td/><td>BF BS</td><td>\u2212</td><td colspan=\"2\">26.9 2046 (26%)</td></tr></table>", |
| "type_str": "table", |
| "text": "12 Performance increase is defined as (old \u2212 new)/new.", |
| "html": null |
| }, |
| "TABREF7": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "BLEU, search error, and average number of calls to score for different stopping criterion. ''shrinking'' refers to the shrinking beam method of", |
| "html": null |
| }, |
| "TABREF9": { |
| "num": null, |
| "content": "<table><tr><td>: BLEU search error, and average number</td></tr><tr><td>of calls to score for output obtained with length</td></tr><tr><td>normalization scoring function on the IWSLT'14</td></tr><tr><td>De-En and MTTT Fr-En test sets. Increase in BLEU</td></tr><tr><td>is over baseline with no length normalization.</td></tr><tr><td>Search error and performance increases are with</td></tr><tr><td>respect to standard beam search decoding using</td></tr><tr><td>the same scoring function.</td></tr></table>", |
| "type_str": "table", |
| "text": "", |
| "html": null |
| }, |
| "TABREF10": { |
| "num": null, |
| "content": "<table><tr><td/><td>k \u03b5</td><td colspan=\"3\">\u03b2 # calls search BLEU</td></tr><tr><td/><td/><td/><td>error</td><td/></tr><tr><td>Baseline</td><td>5 \u2212 10 \u2212</td><td>.05 115 .05 229</td><td>\u2212 \u2212</td><td>33.2 33.0</td></tr></table>", |
| "type_str": "table", |
| "text": "Heuristic 5 .02 .05 129 (0%) 42.7% 33.2 10 .02 .05 256 (0%) 42.7% 33.0 Stopping Criterion 5 3e-4 .05 114 (1%) 29.2% 33.2 10 5e-5 .05 224 (2%) 26.6% 33.0", |
| "html": null |
| }, |
| "TABREF11": { |
| "num": null, |
| "content": "<table><tr><td/><td/><td colspan=\"2\">IWSLT'14 De-En</td><td/></tr><tr><td>k</td><td>\u03b3</td><td>search</td><td>BLEU</td><td># calls</td></tr><tr><td/><td/><td/><td>error</td><td/></tr><tr><td/><td>2</td><td>22.7%</td><td>35.7 +0.1</td><td>43.8 (163%)</td></tr><tr><td>5</td><td>5</td><td>4.4%</td><td>35.8 +0.2</td><td>79.8 (44%)</td></tr><tr><td/><td>n max</td><td>\u2212</td><td>35.6</td><td>93.0 (24%)</td></tr><tr><td/><td>2</td><td>22.6%</td><td>35.7 +0.3</td><td>48.4 (374%)</td></tr><tr><td>10</td><td>5</td><td>4.5%</td><td colspan=\"2\">35.6 +0.2 126.9 (81%)</td></tr><tr><td/><td>n max</td><td>\u2212</td><td>35.4</td><td>169.0 (36%)</td></tr><tr><td/><td/><td colspan=\"2\">WMT'17 De-En</td><td/></tr><tr><td/><td>2</td><td>29.0%</td><td>29.7 +0.2</td><td>77.5 (75%)</td></tr><tr><td>5</td><td>5</td><td>1.2%</td><td colspan=\"2\">29.5 +0.0 115.8 (12%)</td></tr><tr><td/><td>n max</td><td>\u2212</td><td>29.5</td><td>118.8 (10%)</td></tr><tr><td/><td>2</td><td>36.6%</td><td>29.5 +0.2</td><td>97.3 (165%)</td></tr><tr><td>10</td><td>5</td><td>2.6%</td><td colspan=\"2\">29.3 +0.0 230.0 (12%)</td></tr><tr><td/><td>n max</td><td>\u2212</td><td>29.3</td><td>230.2 (12%)</td></tr></table>", |
| "type_str": "table", |
| "text": "BLEU scores with mutual information scoring function on IWSLT'14 De-En. Baseline is PMI decoding with unbounded p(y), that is, \u03b5 = 0. Search error is with respect to beam search decoding of baseline with same \u03b2.", |
| "html": null |
| }, |
| "TABREF12": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "BLEU scores and the number of calls to score on the IWSLT'14 De-En validation set and WMT'17 De-En test set with queue size restricted to n max \u2022 k.", |
| "html": null |
| } |
| } |
| } |
| } |