| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:53:21.411673Z" |
| }, |
| "title": "Consistent Transcription and Translation of Speech", |
| "authors": [ |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Sperber", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "sperber@apple.com" |
| }, |
| { |
| "first": "Hendra", |
| "middle": [], |
| "last": "Setiawan", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "hendra@apple.com" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Gollan", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "cgollan@apple.com" |
| }, |
| { |
| "first": "Udhyakumar", |
| "middle": [], |
| "last": "Nallasamy", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Paulik", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "mpaulik@apple.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The conventional paradigm in speech translation starts with a speech recognition step to generate transcripts, followed by a translation step with the automatic transcripts as input. To address various shortcomings of this paradigm, recent work explores end-to-end trainable direct models that translate without transcribing. However, transcripts can be an indispensable output in practical applications, which often display transcripts alongside the translations to users. We make this common requirement explicit and explore the task of jointly transcribing and translating speech. Although high accuracy of transcript and translation are crucial, even highly accurate systems can suffer from inconsistencies between both outputs that degrade the user experience. We introduce a methodology to evaluate consistency and compare several modeling approaches, including the traditional cascaded approach and end-to-end models. We find that direct models are poorly suited to the joint transcription/translation task, but that end-to-end models that feature a coupled inference procedure are able to achieve strong consistency. We further introduce simple techniques for directly optimizing for consistency, and analyze the resulting trade-offs between consistency, transcription accuracy, and translation accuracy. 1", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The conventional paradigm in speech translation starts with a speech recognition step to generate transcripts, followed by a translation step with the automatic transcripts as input. To address various shortcomings of this paradigm, recent work explores end-to-end trainable direct models that translate without transcribing. However, transcripts can be an indispensable output in practical applications, which often display transcripts alongside the translations to users. We make this common requirement explicit and explore the task of jointly transcribing and translating speech. Although high accuracy of transcript and translation are crucial, even highly accurate systems can suffer from inconsistencies between both outputs that degrade the user experience. We introduce a methodology to evaluate consistency and compare several modeling approaches, including the traditional cascaded approach and end-to-end models. We find that direct models are poorly suited to the joint transcription/translation task, but that end-to-end models that feature a coupled inference procedure are able to achieve strong consistency. We further introduce simple techniques for directly optimizing for consistency, and analyze the resulting trade-offs between consistency, transcription accuracy, and translation accuracy. 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Speech translation (ST) is the task of translating acoustic speech signals into text in a foreign language. According to the prevalent framing of ST (e.g., Ney, 1999) , given some input speech 1 We release human annotations of consistency under https://gi t h u b . com/apple/ml-transcript -translation-consistency-ratings.", |
| "cite_spans": [ |
| { |
| "start": 156, |
| "end": 166, |
| "text": "Ney, 1999)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 193, |
| "end": 194, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "x, ST seeks an optimal translationt \u2208 T , while possibly marginalizing over transcripts s \u2208 S:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "t = argmax t \u2208 T {P (t | x)}", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2248 argmax t \u2208 T s \u2208 S P MT (t | s) P ASR (s | x) .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "According to this formulation, ST models primarily focus on translation quality, while transcription receives less emphasis. In contrast, practical ST user interfaces often display transcripts to the user alongside the translations. A typical example is a two-way conversational ST application that displays the transcript to the speaker for verification, and the translation to the conversation partner (Hsiao et al., 2006) . Therefore, there is a mismatch between this practical requirement and the prevalent framing as described above.", |
| "cite_spans": [ |
| { |
| "start": 404, |
| "end": 424, |
| "text": "(Hsiao et al., 2006)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "While traditional ST models often do commit to a single automatic speech recognition (ASR) transcript that is then passed on to a machine translation (MT) component (Stentiford and Steer, 1988; Waibel et al., 1991) , researchers have undertaken much effort to mitigate resulting error propagation issues by developing models that avoid making decisions on transcripts. Recent examples include direct models (Weiss et al., 2017) that bypass transcript generation, and lattice-to-sequence models (Sperber et al., 2017) that translate the ASR search space as a whole. Despite their merits, such models may not be ideal for scenarios that display both a translation and a corresponding transcript to users.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 193, |
| "text": "(Stentiford and Steer, 1988;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 194, |
| "end": 214, |
| "text": "Waibel et al., 1991)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 407, |
| "end": 427, |
| "text": "(Weiss et al., 2017)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 494, |
| "end": 516, |
| "text": "(Sperber et al., 2017)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we replace Eq. 1 by a joint transcription/translation objective to reflect this requirement:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "s,t = argmax s \u2208 S,t \u2208 T {P (s, t | x)} .", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This change in perspective has significant implications not only on model design but also 1: Example of lexical inconsistencies we encountered when generating transcript and translation independently. Although the transcript correctly contains replay, the German translation (mistakenly) chooses ersetzen (English: replace). The inconsistency is explained by the acoustic similarity between replay and replace, which is not obvious to a monolingual user.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Figure 2: Illustration of surface-level consistency between English transcript and German translation. Only translation 1 spells both named entities (Bill Gross and eSolar) consistently, and the German translation Solarthermaltechnologie (translation 1) is preferred over Solarw\u00e4rme-Technologie (translation 2), by itself a correct choice but less similar on the surface level. on evaluation. First, besides translation accuracy, transcription accuracy becomes relevant and equally important. Second, the issue of consistency between transcript and translation becomes essential. For example, let us consider a naive approach of transcribing and translating with two completely independent, potentially erroneous models. These independent models would expectedly produce inconsistencies, including inconsistent lexical choice caused by acoustic or linguistic ambiguity (Figure 1) , and inconsistent spelling of named entities ( Figure 2 ). Even if output quality is high on average, such inconsistencies may considerably degrade the user experience.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 869, |
| "end": 879, |
| "text": "(Figure 1)", |
| "ref_id": null |
| }, |
| { |
| "start": 928, |
| "end": 936, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our contributions are threefold: First, we introduce the notion of consistency between transcripts and translations and propose methods to assess consistency quantitatively. Second, we survey and extend existing models, and develop novel training and inference schemes, under the hypothesis that both joint model training and a coupled inference procedure are desirable for our goal of accurate and consistent models. Third, we provide a comprehensive analysis, comparing accuracy and consistency for a wide variety of model types across several language pairs to determine the most suitable models for our task and analyze potential trade-offs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To better understand the desiderata of models that perform transcription and translation, it is helpful to discuss how one should evaluate such models. A first step is to evaluate transcription accuracy and translation accuracy in isolation. For this purpose, we can use well-established evaluation metrics such as word error rate (WER) for transcripts and BLEU (Papineni et al., 2002) for translations. When considering scenarios in which both transcript and translation are displayed, consistency is an essential additional requirement. 2 Let us first clarify what we mean by this term.", |
| "cite_spans": [ |
| { |
| "start": 362, |
| "end": 385, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Beyond Accuracy-The Need for Consistency", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Definition: Consistency between transcript and translation is achieved if both are semantically equivalent, with a preference for a faithful translation approach (Newmark, 1988) , meaning that stylistic, lexical, and grammatical characteristics should be transferred whenever fluency is not compromised. Importantly, consistency measures are defined over the space of both well-formed and erroneous sentence pairs. In the case of ungrammatical sentence pairs, consistency may be achieved by adhering to a literal or word-for-word translation strategy.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 177, |
| "text": "(Newmark, 1988)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Beyond Accuracy-The Need for Consistency", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Consistency is only loosely related to accuracy, and can even be in opposition in some cases. For instance, when a translation error cannot be avoided, consistency is improved at the cost of transcription accuracy by placing the backtranslated error in the transcript. Because accuracy and error metrics assess transcript or translation quality in isolation, these metrics cannot capture phenomena that involve the interplay between transcript and translation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Beyond Accuracy-The Need for Consistency", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Although ultimately user studies must assess to what extent consistency improves user satisfaction, our intention in this paper is to provide a universally useful notion of consistency that does not depend too much on specific use cases. Nevertheless, our definition may be most convincing when put in the context of specific example use cases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivational Use Cases", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Lecture Use Case. Here, a person follows a presentation or lecture-like event, presented in a foreign language, by reading transcript and translation on a screen (F\u00fcgen, 2008) . This person may have partial knowledge of the source language, but knows only the target language sufficiently well. She, therefore, pays attention mainly to the translation outputs, but may occasionally consult the transcription output in cases where the translation seems wrong. In this case, quick orientation can be critical, and inconsistencies would cause distraction and undermine trust and perceived transparency of the transcription/ translation service.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 175, |
| "text": "(F\u00fcgen, 2008)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivational Use Cases", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Dialog Use Case. Next, consider the scenario of a dialog between two people who speak different languages. One person, the speaker, attempts to convey a message to the recipient, relying on an ST service that displays a transcript and a translation. Here, the transcript is shown to the speaker, who speaks only the source language, for purposes of verification and possibly correction. The translation is shown to the recipient, who only understands the target language, to convey the message (Hsiao et al., 2006) . We can expect that if transcript and translation are error-free, then the message is conveyed smoothly. However, when the transcript or translation contains errors, miscommunication occurs. To efficiently recover from such miscommunication, both parties should agree on the nature and details of the mistaken content. In other words, occurring errors are preferred to be consistent between transcript and translation.", |
| "cite_spans": [ |
| { |
| "start": 494, |
| "end": 514, |
| "text": "(Hsiao et al., 2006)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivational Use Cases", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Having argued for consistency as a desirable property, we now wish to empirically quantify the level of consistency between a particular model's transcripts and translations. To our knowledge, consistency has not been addressed in the context of ST before, perhaps because traditional cascaded models have not been observed to suffer from inconsistencies in the outputs. Therefore, we propose several metrics for estimating transcript/translation consistency in this section. In \u00a77.3, we demonstrate strong agreement of these metrics with human ratings of consistency.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimating Consistency", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our first metric focuses on semantic equivalency in general, and consistent lexical choice in particular, as illustrated in Figure 1 . To this end, we use a simple lexical coverage model based on word-level translation probabilities. This approach might also capture some aspects of grammatical consistency by rewarding the use of comparable function words. We sum negative translation log-probabilities for each utterance:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 124, |
| "end": 132, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Lexical Consistency", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "t t\u2192s = \u2212 t j \u2208t max s i \u2208s log p (t j | s i ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical Consistency", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We then normalize across the test corpus C and average over both translation directions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical Consistency", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "1 2 1 n (s,t) \u2208 C t t\u2192s + 1 m (s,t) \u2208 C t s\u2192t ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical Consistency", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where n and m denote the number of translated and transcribed words in the corpus, respectively. In practice, we use fast align (Dyer et al., 2013) to estimate probability tables from our training data. When a word has no translation probability assigned, including out-of-vocabulary cases, we use a simple smoothing method by assigning the lowest score found in the lexicon.", |
| "cite_spans": [ |
| { |
| "start": 128, |
| "end": 147, |
| "text": "(Dyer et al., 2013)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical Consistency", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Although it may seem tempting to use a more elaborate translation model such as an encoder-decoder model, we deliberately choose this simple lexical approach. The main reason is that we need to estimate consistency for potentially erroneous transcript/translation pairs. In such cases, we found severe robustness issues when computing translation scores using a full-fledged encoder-decoder model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical Consistency", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Our consistency definition mentions a preference for a stylistic similarity between transcript and translation. One way of assessing stylistic aspects is to compare transcripts and translations at the surface level. This is most sensible when the source and target language are related, and could help capture phenomena such as consistent spelling of named entities, or translations using words with similar surface form as found in the transcript. Figure 2 provides an illustration.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 449, |
| "end": 457, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Surface Form Consistency", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We propose to assess surface form consistency through substring overlap. Our notion of substring overlap follows CharCut, which was proposed as a metric for reference-based MT evaluation (Lardilleux and Lepage, 2017) . Following Eq. 2 of that paper, we determine substring insertions, deletions, and shifts in the translation, when compared with the transcript, and compute 1 \u2212 deletions+insertions+shifts |s|+|t| . Counts are aggregated and normalized at corpus level. To avoid spurious matches, we match only substrings of at least length n (here: 5), compare in casesensitive fashion, and deactivate CharCut's special treatment of longest common prefixes/suffixes.", |
| "cite_spans": [ |
| { |
| "start": 187, |
| "end": 216, |
| "text": "(Lardilleux and Lepage, 2017)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surface Form Consistency", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We note that surface form consistency is less suited to language pairs that use different alphabets, and leave it to future work to explore alternatives, such as the assessment of cross-lingual phonetic similarity in such cases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surface Form Consistency", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "This third metric bases consistency on wellestablished accuracy metrics or error metrics. We posit that a necessary (though not sufficient) condition for consistency is that the accuracy of the transcript should be correlated with the accuracy of the translation, where both are measured against some respective gold standard. We therefore propose to assess consistency through computing statistical correlation between utterance-level error metrics for transcript and translation. Specifically, for a test corpus of size N , we compute Kendall's \u03c4 coefficient across utterancelevel error metrics. On the transcript side, we use utterance-level WER as the error metric. Because BLEU is a poor utterance-level metric, we make use of CharCut on the translation side, which has been shown to correlate well with human judgment at utterance level (Lardilleux and Lepage, 2017) . Formally, we compute:", |
| "cite_spans": [ |
| { |
| "start": 843, |
| "end": 872, |
| "text": "(Lardilleux and Lepage, 2017)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correlation of Transcription/Translation Error", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "kendall \u03c4 WER clipped 1:N , CharCut 1:N . (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correlation of Transcription/Translation Error", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Because CharCut is clipped above 1, we also apply clipping to utterance-level WER for stability. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Correlation of Transcription/Translation Error", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The previous metrics estimate consistency in a fashion that is complementary to accuracy, such that it is possible to achieve good consistency despite poor accuracy. This allows trading off accuracy against consistency, depending on specific task requirements. Here, we explore a particular instance of such a task-specific trade-off that arises naturally through the formulation of a communication model. We consider a dialog situation ( \u00a72.1), and assume that communication will be successful if and only if both transcript and translation do not contain significant deviations from some reference, as motivated in Figure 3 . Conceptually, the main difference to \u00a73.3 is that here we penalize, rather than reward, the bad/bad situation ( Figure 3 ). To estimate the probability of some generated transcript and translation allowing successful communication, given reference transcript and translation, we thus require that both the transcript and the translation are sufficiently accurate. For utterance with index k:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 617, |
| "end": 625, |
| "text": "Figure 3", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 740, |
| "end": 748, |
| "text": "Figure 3", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Combined Metric for Dialog Task", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "P (succ k | ref) = P (s k ok \u2229 t k ok | ref) = P (s k ok | ref) \u00d7 P (t k ok | s k , ref) \u2248 P (s k ok | ref) \u00d7 P (t k ok | ref) (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Combined Metric for Dialog Task", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We then use utterance-level accuracy metrics as a proxy, computing accuracy (s", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Combined Metric for Dialog Task", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "k ) = 1\u2212 WER clipped k , accuracy (t k ) = 1\u2212CharCut k . For a test corpus of size N we compute corpus-level scores as 1 N 1\u2264k\u2264N P (succ k ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Combined Metric for Dialog Task", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We now turn to discuss model candidates for consistent transcription and translation of speech (Figures 4-5). We hypothesize that there are two desirable model characteristics in our scenario. First, motivated by Eq. 2, models may achieve better consistency by performing joint inference, in the sense that no independence assumption between transcript and translation are made. We call this characteristic coupled inference. Second, shared representations through end-to-end (or joint) training may be of advantage in our scenario. We introduce several model variants, and also discuss whether they match these characteristics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models for Transcription and Translation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For a fair comparison, we keep the underlying architectural details as similar as possible across compared model types. All models are based on the attentional encoder-decoder framework (Bahdanau et al., 2015) . For audio encoders, we roughly follow Chiu et al. (2018) 's multilayer bidirectional LSTM model, which encodes log-Mel speech features that are stacked and downsampled by a factor of 3 before being consumed by the encoder. When a model requires a text encoder ( \u00a74.2), we utilize residual connections and feed-forward blocks similar to Vaswani et al. (2017) , although for simplicity we use LSTMs (Hochreiter and Schmidhuber, 1997) rather than self-attention in all encoder (and decoder) components. Similarly, decoder components use residual blocks of (unidirectional) LSTMs and feedforward components (Domhan, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 186, |
| "end": 209, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 250, |
| "end": 268, |
| "text": "Chiu et al. (2018)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 548, |
| "end": 569, |
| "text": "Vaswani et al. (2017)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 609, |
| "end": 643, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 815, |
| "end": 829, |
| "text": "(Domhan, 2018)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Basics", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For ease of reference, we use enc(\u2022) to refer to the encoder component that transforms speech inputs (or embedded text inputs) into a hidden encoder representations, dec(\u2022) to refer to the attentional decoder component that produces hidden decoder states auto-regressively, and SoftmaxOut(\u2022) to refer to the output softmax layer that models discrete output token probabilities. We will subscript components with the parameter sets \u03c0, \u03c6 to indicate cases in which model components are separately parametrized.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Basics", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The cascaded model ( Figure 4a ) represents ST's traditional approach of using separately trained ASR and MT models (Stentiford and Steer, 1988; Waibel et al., 1991) . Here, we use modern sequence-to-sequence ASR and MT components. CASC runs a speech input x 1:l through an ASR model", |
| "cite_spans": [ |
| { |
| "start": 116, |
| "end": 144, |
| "text": "(Stentiford and Steer, 1988;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 145, |
| "end": 165, |
| "text": "Waibel et al., 1991)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 21, |
| "end": 30, |
| "text": "Figure 4a", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Cascaded Model (CASC)", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "g 1:l = enc \u03c6 (x 1:l ) u i = dec \u03c6 (u <i , g 1:l , s i\u22121 ) P (s i | s <i , x 1:l ) = SoftmaxOut \u03c6 (u i ),", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Cascaded Model (CASC)", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "decodes the best hypothesis transcript\u015d, and then applies a separate MT model", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cascaded Model (CASC)", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "h 1:l = enc \u03c0 (\u015d) v i = dec \u03c0 (v <i , h 1:l , t i\u22121 ) P (t i | t <i ,\u015d) = SoftmaxOut \u03c0 (v i ) (6)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cascaded Model (CASC)", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "to generate a translation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cascaded Model (CASC)", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "With respect to the two desirable characteristics of a consistent model, notice that CASC uses a coupled inference procedure, in the sense that no strong independence assumptions are made between transcript and translation. CASC may therefore be a good candidate for consistent speech transcription/translation. However, it is less straightforward to apply end-to-end training to cascaded models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cascaded Model (CASC)", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To improve over the cascaded approach, recent work has focused on end-to-end trainable models, with direct ST models being the most prototypical end-to-end model. In the following, we describe straightforward ways of extending direct models in order to apply them to our joint transcription/translation task. Note that these direct models (Figure 4b-d ) generate transcripts and translations independently at inference time. In other words, these models do not support coupled inference, which may degrade consistency between transcript and translation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 339, |
| "end": 351, |
| "text": "(Figure 4b-d", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Direct Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "It is worth discussing how our consistent transcription/translation scenario relates to the issue of error propagation, an important issue in ST in which translations are degraded due to poor transcription decisions. Prior research on direct ST models has often been motivated by the observation that direct ST models elegantly avoid the error propagation problem. However, note that by shifting perspective to the joint transcription/ translation goal, error propagation loses much of its relevance. First, error propagation is usually used to describe the negative effect of intermediate decisions, but here transcripts no longer function as intermediates. Second, strategies to mitigate error propagation often seek to make translations less influenced by transcription decisions. This is in conflict with our goal of achieving consistency between transcript and translation, which calls for precisely the opposite: Transcription and translation decisions should strongly depend on each other.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Direct Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "A simple way of using direct modeling strategies for our purposes is to use two independent direct models, one for transcription, one for translation (Figure 4b ). Specifically, we compute", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 150, |
| "end": 160, |
| "text": "(Figure 4b", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Independent Direct Model (DIRIND)", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "g 1:l = enc \u03c6 (x 1:l ) u i = dec \u03c6 (u <i , g 1:l , s i\u22121 ) P (s i | s <i , x 1:l ) = SoftmaxOut \u03c6 (u i ) h 1:l = enc \u03c0 (x 1:l ) v i = dec \u03c0 (v <i , h 1:l , t i\u22121 ) P (t i | t <i , x 1:l ) = SoftmaxOut \u03c0 (v i ).", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Independent Direct Model (DIRIND)", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "We are not aware of prior work using independent models for transcription and translation. We include this model as a contrastive baseline for the subsequent two models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Independent Direct Model (DIRIND)", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "A major weakness of DIRIND is that transcription and translation models are trained separately. A better solution is to follow Weiss et al. 2017's approach and sharing the speech encoder between transcription and translation models while making use of multitask training. Compared with Eq. 7, enc \u03c6 and enc \u03c0 would be collapsed into a shared encoder (Figure 4c ). Note that originally, Weiss et al. (2017) and follow-up works use the transcript decoder only to aid training and exploit additional data for ASR as a related task in multitask learning. However, it is straight-forward to utilize the transcript decoder during inference for our purposes.", |
| "cite_spans": [ |
| { |
| "start": 386, |
| "end": 405, |
| "text": "Weiss et al. (2017)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 350, |
| "end": 360, |
| "text": "(Figure 4c", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Multitask Direct Model (DIRMU)", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "We can also take the amount of sharing to the extreme by sharing all weights, not just encoder weights. Increasing the number of shared parameters may positively impact transcription/ translation consistency. We are not aware of prior work using this model variant for performing speech translation. Compared with Eq. 7, both enc \u03c6 /enc \u03c0 and dec \u03c6 /dec \u03c0 are collapsed into a shared encoder and a shared decoder (Figure 4d ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 413, |
| "end": 423, |
| "text": "(Figure 4d", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Shared Direct Model (DIRSH)", |
| "sec_num": "4.3.3" |
| }, |
| { |
| "text": "We previously discussed CASC as a model that features coupled inference but does not support end-to-end training. We also discussed several direct models, some of which support end-to-end training, but none of which follow a coupled inference procedure. This section introduces joint models that support both end-to-end training and coupled inference. 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Joint Models", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The two-stage model (Kano et al., 2017) is conceptually close to the cascaded approach but is end-to-end trainable because continuous transcript decoder states are passed on to the translation stage. Following Sperber et al. (2019)'s formulation, we re-use Eq. (5) to model a transcript s and hidden decoder states u m 1 , and then compute", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 39, |
| "text": "(Kano et al., 2017)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Two-Stage Model (2ST)", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "v i = dec \u03c0 (v <i , u m 1 ) P (t i | t <i , u 1:m ) = SoftmaxOut \u03c0 (v i ).", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Two-Stage Model (2ST)", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "Beam search is applied to decode transcripts, as well as the corresponding hidden decoder states u 1:m that are then translated. Note that in contrast to our paper, Kano et al. (2017) and Sperber et al. (2019) treat transcripts only as intermediate computations and do not report transcription accuracies.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 183, |
| "text": "Kano et al. (2017)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 188, |
| "end": 209, |
| "text": "Sperber et al. (2019)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Two-Stage Model (2ST)", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "The triangle model (Anastasopoulos and Chiang, 2018) extends 2ST by adding a second attention mechanism to the translation decoder that directly attends to the encoded speech inputs. Eq. 5 is reused for transcription, and translations are computed as", |
| "cite_spans": [ |
| { |
| "start": 19, |
| "end": 52, |
| "text": "(Anastasopoulos and Chiang, 2018)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Triangle Model (TRI)", |
| "sec_num": "4.4.2" |
| }, |
| { |
| "text": "v i = dec \u03c0 (v <i , [u 1:m ; h 1:l ], t i\u22121 ) P (t i |t <i , u 1:m , x 1:l ) = SoftmaxOut \u03c0 (v i ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Triangle Model (TRI)", |
| "sec_num": "4.4.2" |
| }, |
| { |
| "text": "(9) TRI can be seen as combining DIRMU's advantage of featuring a direct connection between speech and translation, and 2ST's advantage of supporting joint inference. Anastasopoulos and Chiang (2018) evaluate both transcription and translation accuracy in a low-resource setting and report consistent improvements for the latter but less reliable gains for the former.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Triangle Model (TRI)", |
| "sec_num": "4.4.2" |
| }, |
| { |
| "text": "Haghani et al. (2018) propose a sequence-tosequence model that produces the concatenation of two outputs sequences in the context of spoken language understanding. To our knowledge it has not been utilized in an ST context before, but is a very natural fit for our joint transcription/ translation scenario. CONCAT shares both the encoder and the decoder, leading to improved compactness: Having surveyed models that are suitable for our task to various degrees, we next explore simple ways to further improve the consistency of the generated outputs through adjusting training or inference objectives.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concatenated Model (CONCAT)", |
| "sec_num": "4.4.3" |
| }, |
| { |
| "text": "r 1:m+n := s 1 . . . s m t 1 . . . t n g 1:l = enc(x 1:l ) u i = dec(u <i , g 1:l , r i\u22121 ) P (r i | r <i , x 1:l ) = SoftmaxOut(u i ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concatenated Model (CONCAT)", |
| "sec_num": "4.4.3" |
| }, |
| { |
| "text": "At training time, we wish to introduce a loss term that penalizes inconsistent outputs. Whereas the consistency measures discussed in \u00a73 are all defined at either the utterance or the corpus level, we define our loss term at the token level for convenient integration with the standard cross entropy loss term. For convenience, we opt to follow the notion of surface-level consistency ( \u00a73.2), according to which we may encourage models to assign probability mass to transcript (subword) tokens that appear in the translation, and to translated tokens that appear in the transcript. 4 Consider the standard cross entropy loss, which is computed against the ground-truth label distribution q(y i ) = \u03b4 y i ,y * i for predicted label y i at target position i, assigning all probability mass to the reference token y * i . We modify the ground truth label distribution for transcript and translation outputs, respectively:", |
| "cite_spans": [ |
| { |
| "start": 583, |
| "end": 584, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Consistency as Training Objective", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "q \u2032 transl (y i ) = (1 \u2212 \u01eb)\u03b4 y i ,t i + \u01eb |s| w\u2208s \u03b4 y i ,w q \u2032 transcr (y i ) = (1 \u2212 \u01eb)\u03b4 y i ,s i + \u01eb |t| w\u2208t \u03b4 y i ,w", |
| "eq_num": "(11)" |
| } |
| ], |
| "section": "Consistency as Training Objective", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "This can be seen as an instance of non-uniform label smoothing with strength \u01eb (Szegedy et al., 2016) . In practice, we give this loss term a relative weight of 0.1 during training, while at the same time disabling label smoothing. Because this loss requires access to the complete transcript and translation, we do not apply it at inference time.", |
| "cite_spans": [ |
| { |
| "start": 79, |
| "end": 101, |
| "text": "(Szegedy et al., 2016)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Consistency as Training Objective", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We can also modify the inference objective to enforce more consistent outputs. A simple way for accomplishing this is via n-best rescoring. This is especially convenient when using consistency measures such as lexical consistency ( \u00a73.1), which can be computed without referring to a gold standard. Our approach here follows two simple steps: First, we compute n-best lists using standard beam search. Second, we select the (s, t)-pair that produces the best lexical consistency score. Expectedly, this rescoring approach will yield improved consistency, while possibly degrading transcript or translation accuracy. Future work may explore ways for more explicitly balancing model and consistency scores.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Consistency as Inference Objective", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "6 Experimental Setup", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Consistency as Inference Objective", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We conduct experiments on the MuST-C corpus (di Gangi et al., 2019), the largest publicly available ST corpus, containing TED 5 talks paired with English transcripts and translations into several languages. We present results for German, Spanish, Dutch, and Russian as the target language, where the data size is 408-504 hours of English speech, corresponding to 234K-270K utterances. In TED, translated subtitles are not displayed simultaneously with the transcribed subtitles, and consistency is therefore not inherently required in this data. In practice, however, the manual translation workflow in TED results in a sufficient level of consistency between transcripts and translations. Specifically, transcripts are generated first, and translators are required to use the transcript as a starting point while also referring to the audio. 6 We use MuST-C dev for validation and report results on tst-COMMON.", |
| "cite_spans": [ |
| { |
| "start": 843, |
| "end": 844, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "We make use of the 40-dimensional log Mel filterbank speech features provided with the corpus. The only text preprocessing applied to the training data is subword tokenization using SentencePiece (Kudo and Richardson, 2018) with the unigram setting. Following most recent work on end-to-end ST models, we choose a relatively small vocabulary size of 1024, with transcription/translation vocabularies shared. No additional preprocessing steps are applied for training, but for transcript evaluation we remove punctuation and non-speech event markers such as (laughter), and compute case-insensitive WER. For translations, we remove non-speech markers from the decoded outputs and use SacreBleu 7 (Post, 2019) to handle tokenization and scoring.", |
| "cite_spans": [ |
| { |
| "start": 196, |
| "end": 223, |
| "text": "(Kudo and Richardson, 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 695, |
| "end": 707, |
| "text": "(Post, 2019)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model and Training Details", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "Model hyperparameters are manually tuned for the highest accuracy with DIRMU, our most relevant baseline. Unless otherwise noted, the same hyperparameters are used for all other model types. Weights for the speech encoder are initialized based on a pre-trained attentional ASR task that is identical to the ASR part of the direct multitask model. Other weights are initialized according to Glorot and Bengio (2010) . The speech encoder is a 5-layer bidirectional LSTM with 700 dimensions per direction. Attentional decoders consist of 2 Transformer blocks (Vaswani et al., 2017) but use 1024-dimensional unidirectional LSTM instead of self-attention as a sequence model, except for the CONCAT and DIRSH for which we increase to 3 layers. For CASC's MT model, encoder/decoder both contain 6 layers with 1024-dimensional LSTMs. Subword embeddings are of size 1024.", |
| "cite_spans": [ |
| { |
| "start": 390, |
| "end": 414, |
| "text": "Glorot and Bengio (2010)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 556, |
| "end": 578, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model and Training Details", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We regularize using LSTM dropout with p = 0.3, decoder input word-type dropout (Gal and Ghahramani, 2016) , and attention dropout, both p = 0.1. We apply label smoothing with strength \u01eb = 0.1. We optimize using Adam (Kingma and Ba, 2014) with \u03b1 = 0.0005, \u03b2 1 = 0.9, \u03b2 2 = 0.98, 4000 warm-up steps, and learning rate decay by using the inverse square root of the iteration. We set the batch size dynamically based on the sentence length, such that the average batch size is 128 utterances. The training is stopped when the validation score has not improved over 3 epochs, where the validation score is the product of corpuslevel translation BLEU score and corpus-level transcription word accuracy.", |
| "cite_spans": [ |
| { |
| "start": 79, |
| "end": 105, |
| "text": "(Gal and Ghahramani, 2016)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model and Training Details", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "For decoding and generating n-best lists, we use beam size 10 and polynomial length normalization with exponent 1.5. Our implementation is based on PyTorch (Paszke et al., 2019) and xnmt (Neubig et al., 2018) , and all trainings are done using single-GPU environments, utilizing Tesla V100 GPUs with 32 GB memory.", |
| "cite_spans": [ |
| { |
| "start": 156, |
| "end": 177, |
| "text": "(Paszke et al., 2019)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 187, |
| "end": 208, |
| "text": "(Neubig et al., 2018)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model and Training Details", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "To obtain a gold standard to compare our proposed automatic consistency metrics against, we collect transcript/translation consistency ratings from Model E2E training Table 1 : Overview of models and key properties. All models except CASC/DIRIND are end-to-end (E2E) trained. Models also differ in whether translations are conditioned on transcripts (t|s), and whether conditioning is implemented through attention or through sequential decoder states.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 167, |
| "end": 174, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Human Ratings", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "t | s \u00a74.2 CASC - attention \u00a74.3.1 DIRIND - - \u00a74.3.2 DIRMU - \u00a74.3.3 DIRSH - \u00a74.4.1 TRI attention \u00a74.4.2 2ST attention \u00a74.4.3 CONCAT sequential", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Ratings", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "human annotators. The annotators are presented a single transcript/translation pair at a time, and are asked to judge the consistency on a 4-point Likert scale. We aimed for a balanced scale which assigned a score of 4 to cases with no or only minor mismatch, a score of 3 to indicate a purely stylistic mismatch, a score of 2 to indicate a partial semantic mismatch, and a score of 1 to a complete semantic mismatch. Instructions given to the annotators include an explanation of the definition given in \u00a72 along with a table of several examples for each of the 4 categories. We displayed transcripts and translations in randomized order, so as to obfuscate the directionality of the translation, and do not provide the source speech utterances. Annotators are recruited from an in-house pool of trusted annotators and required to be proficient English and German speakers. For each of the 2641 speech utterances in the MuST-C English-German test set, we collect annotations for 8 transcript/translation pairs: 7 system outputs produced by the models in Table 1 , and the reference transcript/translation pairs. Each transcript/translation item is rated individually and by at least three different annotators. In total, we used 58 raters to produce 63412 ratings. We fit a linear mixed-effects model on the result using the lme4 package (Bates et al., 2013) , which allows estimating the consistency of the outputs for each system, while accounting for random effects of each annotator and of each input sentence. We refer to Norman (2010) and Gibson et al. (2011) for a discussion of using mixed-effects models in the context of Likert-scale ratings.", |
| "cite_spans": [ |
| { |
| "start": 1339, |
| "end": 1359, |
| "text": "(Bates et al., 2013)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1528, |
| "end": 1541, |
| "text": "Norman (2010)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1546, |
| "end": 1566, |
| "text": "Gibson et al. (2011)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1055, |
| "end": 1062, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Human Ratings", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "We start by presenting empirical results across all four language pairs, and will then focus on English-German to discuss details. Table 1 contrasts the different model types that we examine.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 131, |
| "end": 138, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "7" |
| }, |
| { |
| "text": "To validate our implementation and to evaluate the overall model accuracy, Table 2 compares models across four language pairs. The table confirms that, except for DIRIND, our models obtain strong overall accuracies, as compared with prior work on the same data by Di Gangi et al. (2019). 8 Overall, CASC outperforms CONCAT and the 3 direct models in terms of WER and BLEU. 2ST/TRI achieve similar or stronger translation accuracy compared with CASC. Joint model training (used by all models except CASC and DIRIND) seems to hurt transcription accuracy somewhat, although the differences are often not statistically significant. This may be caused by an inherent trade-off between translation and transcription accuracy, as discussed by . Finally, CONCAT achieves favorable transcription accuracies, and translation accuracies fall between direct models and non-direct models in most cases. Table 2 also shows results for lexical consistency. Without exception, 2ST/TRI achieve the best results, followed by CASC and CONCAT. The direct models perform poorly in all cases. Given that CASC is by design a natural choice for joint transcription/translation, we did not necessarily expect 2ST/TRI to achieve better consistency. This encouraging evidence for the versatility of end-toend trainable models is also supported by human ratings ( \u00a77.3).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 75, |
| "end": 82, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 890, |
| "end": 897, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Accuracy Comparison", |
| "sec_num": "7.1" |
| }, |
| { |
| "text": "To categorize models regarding inference procedure and end-to-end training (Table 1) , we observe that coupled inference (all non-direct models) is most decisive for achieving good consistency, with conditioning on generated transcripts through sequential hidden states (CONCAT) being less effective than conditioning through . 3), and the combined task-specific metric (Cmb; \u00a73.4). Bold font indicates the best score among automatic outputs. Results that are not statistically significantly worse than the best score in the same column are in italics.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 75, |
| "end": 84, |
| "text": "(Table 1)", |
| "ref_id": null |
| }, |
| { |
| "start": 326, |
| "end": 327, |
| "text": ".", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Lexical Consistency Comparison", |
| "sec_num": "7.2" |
| }, |
| { |
| "text": "attention (other non-direct models). End-to-end training also appears beneficial for consistency (CASC vs. 2ST/TRI and DIRIND vs. DIRMU/DIRSH). Table 3 presents more details for English-German and includes human ratings as gold standard, along with all four proposed automatic consistency measures. Note that the reported human ratings correspond to the intercepts of the linear mixedeffects model ( \u00a76.3). The fitted model estimates the standard deviation of the random effect for annotators at 0.28 and for input sentences at 0.37. All pairwise differences between the systems in the table are statistically significant (p < 0.01) according to an ANOVA test.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 144, |
| "end": 151, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Lexical Consistency Comparison", |
| "sec_num": "7.2" |
| }, |
| { |
| "text": "Encouragingly, lexical and surface form consistencies are aligned, and follow the same trends as the gold standard. The correlation-based measure agrees on the inferior consistency of direct models and the superior consistency of TRI, while producing slightly different orderings among the remaining models. According to our combined dialog-specific measure, TRI/2ST are tied for the best overall model. One noteworthy observation is that lexical consistency of references is far worse than for 2ST/TRI outputs. This contradicts the gold standard outputs and is possibly caused by both the system outputs and the lexical consistency score being overly literal and biased toward high-frequent outputs. For comparison against references, the surface form consistency therefore appears to be a better choice. Table 4 : Direct optimization for consistency. We compare training ( \u00a75.1) and inference ( \u00a75.2) approaches. Bold font indicates the best score. Table 4 considers the English-German translation direction, and examines the effect of using strategies for direct optimization of consistency at training and inference time ( \u00a75). All of the examined techniques improve consistency, though often at the cost of degraded accuracy. The training-time techniques appear more detrimental to transcription accuracy, and the inference-time techniques are more detrimental to translation accuracy. Although DIRMU benefits strongly from these techniques, it still falls behind TRI's consistency. For TRI, on the other hand, surface form consistency improves to the point where it almost matches the surface form consistency between reference transcripts and translations (3.594, see Table 3 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 806, |
| "end": 813, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 951, |
| "end": 958, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 1675, |
| "end": 1682, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis of Consistency Metrics", |
| "sec_num": "7.3" |
| }, |
| { |
| "text": "Tables 2 and 3 tend to assign better consistency scores to models with higher accuracy scores. We wish to verify whether the trend is owed to the model characteristics or whether this indicates that our metrics fail to decouple accuracy and consistency. To this end, we again focus on English-German and introduce two new model variants: First, CINDP performs translation using CASC, but transcribes with an independently trained direct model. Expectedly, such a model shows high accuracy but low consistency, a hypothesis that is confirmed by results in Table 5 , contrasted against DIRMU. Second, we train a weaker 2-stage model by using only half the training data. For such a model, we would expect lower accuracy but not lower consistency, which is again confirmed by Table 5 , at least to some extent (lexical consistency is worse, but the correlation measure improves). These findings indicate that Table 5 : Consistency vs. accuracy. CINDP achieves better accuracy than DIRMU, but worse consistency scores. 2ST/2 is trained on less data than 2ST, which hurts its accuracy but not its consistency scores. accuracy and consistency are in fact reasonably well decoupled.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 555, |
| "end": 562, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 773, |
| "end": 780, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 906, |
| "end": 913, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Consistency vs. Accuracy", |
| "sec_num": "7.5" |
| }, |
| { |
| "text": "Manual inspection of the outputs of DIRMU and TRI for the English-German model confirms our intuition and the quantitative findings presented above, namely, that DIRMU suffers from considerable consistency issues due to transcripts and translations being generated separately. Examples in the decoded test data are in fact easy to spot, whereas for TRI we do find any consistency problems. Figures 6-8 show cherry-picked examples.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 390, |
| "end": 401, |
| "text": "Figures 6-8", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Qualitative Analysis", |
| "sec_num": "7.6" |
| }, |
| { |
| "text": "To our knowledge there exists no prior work on consistency for joint transcription and translation of speech in particular, or other multitask conditional sequence generation models in general. The closest related prior work is perhaps Ribeiro et al. (2019) , who analyze the case of contradictory model outputs in a question answering task in which multiple different but highly related questions are shown to the model. Other prior work examines the trade-off between transcription Figure 7 : Here, DIRMU makes inconsistent lexical choices for transcript and translation, leading to a correct translation despite an incorrect transcript. Figure 8 : This is an example where DIRMU produces incorrect outputs on both sides, with seemingly unrelated semantics.", |
| "cite_spans": [ |
| { |
| "start": 236, |
| "end": 257, |
| "text": "Ribeiro et al. (2019)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 484, |
| "end": 492, |
| "text": "Figure 7", |
| "ref_id": null |
| }, |
| { |
| "start": 640, |
| "end": 648, |
| "text": "Figure 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "and translation quality in more traditional speech translation models theoretically and empirically . Findings indicate that optimizing for WER does not necessarily lead to the best translations in a cascaded speech translation model, which is in line with the accuracy trade-offs observed in our experiment. Concurrent work explores synchronous decoding strategies for jointly transcribing and translating speech, but does not discuss the issue of consistency (Liu et al., 2020) . With regard to our consistency evaluation metrics, a closely related line of research is work on quality estimation and cross-lingual similarity metrics (Fonseca et al., 2019 ). An important difference of transcription/translation consistency is that for purposes of assessing consistency there is no directionality, and both input sequences can be erroneous. It is therefore especially important for metrics to be robust against errors on both sides. Moreover, stylistic differences are often not accounted for in this line of prior work. We note the similarity of our proposed lexical consistency metric to work by Popovi\u0107 et al. (2011) , and leave it for future work to explore whether metrics from other related work can and should be employed to measure consistency.", |
| "cite_spans": [ |
| { |
| "start": 461, |
| "end": 479, |
| "text": "(Liu et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 635, |
| "end": 656, |
| "text": "(Fonseca et al., 2019", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1099, |
| "end": 1120, |
| "text": "Popovi\u0107 et al. (2011)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Finally, producing transcripts alongside translations may be framed as producing an explanation (the transcript) alongside the main output (the translation). Research on explainable machine learning systems (Smith-Renner et al., 2020 , and references therein) may shed light on desirable properties of these explanation from a usability point of view, as well as questions related to appropriate user interface design.", |
| "cite_spans": [ |
| { |
| "start": 207, |
| "end": 233, |
| "text": "(Smith-Renner et al., 2020", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "This paper investigates the task of jointly transcribing and translating speech, which is relevant for use cases in which both transcripts and translations are displayed to users. The main theme has been the discussion of consistency between transcripts and translations. To this end, we proposed a notion of consistency and introduced techniques to estimate it. We conducted a thorough comparison across a wide range of models, both traditional and end-to-end trainable, with regards to both accuracy and consistency. As important model ingredients, we found that a coupled inference procedure, where translations are conditioned on transcripts through attention, is particularly helpful. We also found that end-toend training improves consistency and translations but at the cost of degraded transcripts. We further introduced training and inference techniques that are effective at further improving consistency, which we found to also come with some trade-offs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "9" |
| }, |
| { |
| "text": "Future work should examine how consistency correlates with user experience in practice and establish specific trade-offs for various use cases. Moreover, our techniques are applicable to other multitask use cases that could potentially benefit from consistent outputs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "9" |
| }, |
| { |
| "text": "Other important ST use cases do not show both transcripts at the same time, such as multilingual movie subtitling. For such cases, consistency may be less critical.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "It is worth noting that the models discussed in \u00a74.4 match our joint optimization goal exactly: P (t|s, x)P (s|x) = P (t, s|x). This is in contrast to CASC, which assumes conditional independence between translation and input speech, given the transcript. However, we do not expect this to be of major importance for purposes of generating consistent transcripts and translations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Similarly to \u00a73, this strategy targets related languages with shared alphabets, and our results for an English-German speech translation task are encouraging ( \u00a77.4). We leave it to future work to explore more elaborate solutions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "www.ted.com. 6 www.ted.com/participate/translate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "hash: case.lc+numrefs.1+smooth.4+tok.13a+version.1.4.3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Concurrent work(Liu et al., 2020) obtains better transcription results, but compiles its own version of the TED corpus, thus it is unclear to what extent differences can be attributed to better data filtering strategies, which are known to be a potential issue in MuST-C.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Tied Multitask Learning for Neural Speech Translation", |
| "authors": [ |
| { |
| "first": "Antonios", |
| "middle": [], |
| "last": "Anastasopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "North American Chapter of the Association for Computational Linguistics (NAACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1008" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antonios Anastasopoulos and David Chiang. 2018. Tied Multitask Learning for Neural Speech Translation. In North American Chapter of the Association for Computational Linguis- tics (NAACL). New Orleans, LA, USA. DOI: https://doi.org/10.18653/v1/N18 -1008", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Neural Machine Translation by Jointly Learning to Align and Translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Conference on Representation Learning (ICLR)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, KyungHyun Cho, and Yoshua Bengio. 2015. Neural Machine Translation by Jointly Learning to Align and Translate. In International Conference on Representation Learning (ICLR). San Diego, CA, USA.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "lme4: Linear mixedeffects models using Eigen and S4", |
| "authors": [ |
| { |
| "first": "Douglas", |
| "middle": [], |
| "last": "Bates", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Maechler", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Bolker", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Walker", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douglas Bates, Martin Maechler, Ben Bolker, and Steven Walker. 2013. lme4: Linear mixed- effects models using Eigen and S4. R package version, 1(4).", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "State-of-the-art Speech Recognition With Sequence-to-Sequence Models", |
| "authors": [ |
| { |
| "first": "Chung-Cheng", |
| "middle": [], |
| "last": "Chiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tara", |
| "middle": [ |
| "N" |
| ], |
| "last": "Sainath", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rohit", |
| "middle": [], |
| "last": "Prabhavalkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Anjuli", |
| "middle": [], |
| "last": "Kannan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ron", |
| "middle": [ |
| "J" |
| ], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Kanishka", |
| "middle": [], |
| "last": "Rao", |
| "suffix": "" |
| }, |
| { |
| "first": "Katya", |
| "middle": [], |
| "last": "Gonina", |
| "suffix": "" |
| }, |
| { |
| "first": "Navdeep", |
| "middle": [], |
| "last": "Jaitly", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Chorowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Michiel", |
| "middle": [], |
| "last": "Bacchiani", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4774--4778", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chung-Cheng Chiu, Tara N. Sainath, Yonghui Wu, Rohit Prabhavalkar, Patrick Nguyen, Zhifeng Chen, Anjuli Kannan, Ron J. Weiss, Kanishka Rao, Katya Gonina, Navdeep Jaitly, Bo Li, Jan Chorowski, and Michiel Bacchiani. 2018. State-of-the-art Speech Recognition With Sequence-to-Sequence Models. In Interna- tional Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 4774-4778. Calgary, Canada.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Adapting Transformer to Endto-end Spoken Language Translation", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "Di" |
| ], |
| "last": "Mattia", |
| "suffix": "" |
| }, |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Gangi", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Negri", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Turchi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Annual Conference of the International Speech Communication Association (InterSpeech)", |
| "volume": "", |
| "issue": "", |
| "pages": "1133--1137", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mattia A. Di Gangi, Matteo Negri, and Marco Turchi. 2019. Adapting Transformer to End- to-end Spoken Language Translation. In An- nual Conference of the International Speech Communication Association (InterSpeech), pages 1133-1137. Graz, Austria.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "How Much Attention Do You Need ? A Granular Analysis of Neural Machine Translation Architectures", |
| "authors": [ |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Domhan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Association for Computational Linguistic (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "1799--1808", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tobias Domhan. 2018. How Much Attention Do You Need ? A Granular Analysis of Neural Machine Translation Architectures. In Asso- ciation for Computational Linguistic (ACL), pages 1799-1808. Melbourne, Australia.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A Simple, Fast, and Effective Reparameterization of IBM Model 2", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Chahuneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "North American Chapter of the Association for Computational Linguistics (NAACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "644--648", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Dyer, Victor Chahuneau, and Noah A. Smith. 2013. A Simple, Fast, and Effective Reparameterization of IBM Model 2. In North American Chapter of the Association for Com- putational Linguistics (NAACL), pages 644-648. Atlanta, GA, USA.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Findings of the WMT 2019 Shared Tasks on Quality Estimation", |
| "authors": [ |
| { |
| "first": "Erick", |
| "middle": [], |
| "last": "Fonseca", |
| "suffix": "" |
| }, |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Yankovskaya", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "T" |
| ], |
| "last": "Andr\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Martins", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Fishel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Federmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Conference on Machine Translation (WMT)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5401" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Erick Fonseca, Lisa Yankovskaya, Andr\u00e9 F. T. Martins, Mark Fishel, and Christian Federmann. 2019. Findings of the WMT 2019 Shared Tasks on Quality Estimation. In Conference on Ma- chine Translation (WMT). Florence, Italy. DOI: https://doi.org/10.18653/v1/W19 -5401", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A System for Simultaneous Translation of Lectures and Speeches", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "F\u00fcgen", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian F\u00fcgen. 2008. A System for Simulta- neous Translation of Lectures and Speeches. PhD thesis, University of Karlsruhe.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A Theoretically Grounded Application of Dropout in Recurrent Neural Networks", |
| "authors": [ |
| { |
| "first": "Yarin", |
| "middle": [], |
| "last": "Gal", |
| "suffix": "" |
| }, |
| { |
| "first": "Zoubin", |
| "middle": [], |
| "last": "Ghahramani", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Neural Information Processing Systems Conference (NIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "1019--1027", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yarin Gal and Zoubin Ghahramani. 2016. A Theo- retically Grounded Application of Dropout in Recurrent Neural Networks. In Neural Informa- tion Processing Systems Conference (NIPS), pages 1019-1027. Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "MuST-C : A Multilingual Speech Translation Corpus", |
| "authors": [ |
| { |
| "first": "Antonino", |
| "middle": [], |
| "last": "Mattia Di Gangi", |
| "suffix": "" |
| }, |
| { |
| "first": "Roldano", |
| "middle": [], |
| "last": "Cattoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Luisa", |
| "middle": [], |
| "last": "Bentivogli", |
| "suffix": "" |
| }, |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Negri", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Turchi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "North American Chapter of the Association for Computational Linguistics (NAACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antonino Mattia di Gangi, Roldano Cattoni, Luisa Bentivogli, Matteo Negri, and Marco Turchi. 2019. MuST-C : A Multilingual Speech Translation Corpus. In North American Chapter of the Association for Computational Linguis- tics (NAACL). Minneapolis, MN, USA.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Using Mechanical Turk to Obtain and Analyze English Acceptability Judgments", |
| "authors": [ |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Gibson", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Piantadosi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Fedorenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Language and Linguistics Compass", |
| "volume": "5", |
| "issue": "8", |
| "pages": "509--524", |
| "other_ids": { |
| "DOI": [ |
| "10.1111/j.1749-818X.2011.00295.x" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edward Gibson, Steve Piantadosi, and Kristina Fedorenko. 2011. Using Mechanical Turk to Obtain and Analyze English Acceptability Judg- ments. Language and Linguistics Compass, 5(8):509-524. DOI: https://doi.org /10.1111/j.1749-818X.2011.00295.x", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Understanding the Difficulty of Training Deep Feedforward Neural Networks", |
| "authors": [ |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Glorot", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "International Conference on Artificial Intelligence and Statistics (AISTATS)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understanding the Difficulty of Training Deep Feedforward Neural Networks. In International Conference on Artificial Intelligence and Statistics (AISTATS). Sardinia, Italy.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "From Audio to Semantics: Approaches to End-to-End Spoken Language Understanding", |
| "authors": [ |
| { |
| "first": "Parisa", |
| "middle": [], |
| "last": "Haghani", |
| "suffix": "" |
| }, |
| { |
| "first": "Arun", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Michiel", |
| "middle": [], |
| "last": "Bacchiani", |
| "suffix": "" |
| }, |
| { |
| "first": "Galen", |
| "middle": [], |
| "last": "Chuang", |
| "suffix": "" |
| }, |
| { |
| "first": "Neeraj", |
| "middle": [], |
| "last": "Gaur", |
| "suffix": "" |
| }, |
| { |
| "first": "Pedro", |
| "middle": [], |
| "last": "Moreno", |
| "suffix": "" |
| }, |
| { |
| "first": "Rohit", |
| "middle": [], |
| "last": "Prabhavalkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongdi", |
| "middle": [], |
| "last": "Qu", |
| "suffix": "" |
| }, |
| { |
| "first": "Austin", |
| "middle": [], |
| "last": "Waters", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Spoken Language Technology Workshop (SLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/SLT.2018.8639043" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Parisa Haghani, Arun Narayanan, Michiel Bacchiani, Galen Chuang, Neeraj Gaur, Pedro Moreno, Rohit Prabhavalkar, Zhongdi Qu, and Austin Waters. 2018. From Audio to Semantics: Approaches to End-to-End Spoken Language Understanding. In Spoken Language Technol- ogy Workshop (SLT). Athens, Greece. DOI: https://doi.org/10.1109/SLT.2018 .8639043", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Speech Recognition, Machine Translation, and Speech Translation -A Unified Discriminative Learning Paradigm", |
| "authors": [ |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "IEEE Signal Processing Magazine", |
| "volume": "28", |
| "issue": "5", |
| "pages": "126--133", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/MSP.2011.941852" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaodong He and Li Deng. 2011. Speech Re- cognition, Machine Translation, and Speech Translation -A Unified Discriminative Learn- ing Paradigm. IEEE Signal Processing Maga- zine, 28(5):126-133. DOI: https://doi .org/10.1109/MSP.2011.941852", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Why Word Error Rate Is Not a Good Metric for Speech Recognizer Training for the Speech Translation Task", |
| "authors": [ |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Acero", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5632--5635", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaodong He, Li Deng, and Alex Acero. 2011. Why Word Error Rate Is Not a Good Metric for Speech Recognizer Training for the Speech Translation Task? In International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5632-5635. Prague, Czech Republic.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Long Short-Term Memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/neco.1997.9.8.1735" |
| ], |
| "PMID": [ |
| "9377276" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long Short-Term Memory. Neural Computa- tion, 9(8):1735-1780. DOI: https://doi .org/10.1162/neco.1997.9.8.1735, PMID: 9377276", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Optimizing Components for Handheld Two-Way Speech Translation for an English-Iraqi Arabic System", |
| "authors": [ |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Hsiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Venugopal", |
| "suffix": "" |
| }, |
| { |
| "first": "Thilo", |
| "middle": [], |
| "last": "K\u00f6hler", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Paisarn", |
| "middle": [], |
| "last": "Charoenpornsawat", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Zollmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Vogel", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Schultz", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Waibel", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Annual Conference of the International Speech Communication Association (InterSpeech)", |
| "volume": "", |
| "issue": "", |
| "pages": "765--768", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roger Hsiao, Ashish Venugopal, Thilo K\u00f6hler, Ting Zhang, Paisarn Charoenpornsawat, Andreas Zollmann, Stephan Vogel, Alan W. Black, Tanja Schultz, and Alex Waibel. 2006. Optimizing Components for Handheld Two- Way Speech Translation for an English-Iraqi Arabic System. In Annual Conference of the International Speech Communication Associa- tion (InterSpeech), pages 765-768. Pittsburgh, PA, USA.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Structured-based Curriculum Learning for End-to-end English-Japanese Speech Translation", |
| "authors": [ |
| { |
| "first": "Takatomo", |
| "middle": [], |
| "last": "Kano", |
| "suffix": "" |
| }, |
| { |
| "first": "Sakriani", |
| "middle": [], |
| "last": "Sakti", |
| "suffix": "" |
| }, |
| { |
| "first": "Satoshi", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Annual Conference of the International Speech Communication Association (InterSpeech)", |
| "volume": "", |
| "issue": "", |
| "pages": "2630--2634", |
| "other_ids": { |
| "DOI": [ |
| "10.21437/Interspeech.2017-944" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Takatomo Kano, Sakriani Sakti, and Satoshi Nakamura. 2017. Structured-based Curriculum Learning for End-to-end English-Japanese Speech Translation. In Annual Conference of the International Speech Communication Association (InterSpeech), pages 2630-2634. DOI: https://doi.org/10.21437", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Adam: A Method for Stochastic Optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [ |
| "L" |
| ], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "International Conference on Learning Representations (ICLR)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy L. Ba. 2014. Adam: A Method for Stochastic Optimization. In International Conference on Learning Representations (ICLR). Banff, Canada.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Statistical Significance Tests for Machine Translation Evaluation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "388--395", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2004. Statistical Significance Tests for Machine Translation Evaluation. In Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 388-395. Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Sentence Piece: A Simple and Language Independent Subword Tokenizer and Detokenizer For Neural Text Processing", |
| "authors": [ |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "66--71", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-2012" |
| ], |
| "PMID": [ |
| "29382465" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taku Kudo and John Richardson. 2018. Sentence Piece: A Simple and Language Independent Subword Tokenizer and Detokenizer For Neural Text Processing. Empirical Methods in Natural Language Processing (EMNLP), pages 66-71. DOI: https://doi.org /10.18653/v1/D18-2012, PMID: 29382465", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "CHARCUT: Human-Targeted Character-Based MT Evaluation with Loose Differences", |
| "authors": [ |
| { |
| "first": "Adrien", |
| "middle": [], |
| "last": "Lardilleux", |
| "suffix": "" |
| }, |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Lepage", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Workshop on Spoken Language Translation (IWSLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adrien Lardilleux and Yves Lepage. 2017. CHARCUT: Human-Targeted Character-Based MT Evaluation with Loose Differences. In International Workshop on Spoken Language Translation (IWSLT). Tokyo, Japan.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Haifeng Wang, and Chengqing Zong. 2020. Synchronous Speech Recognition and Speech-to-Text Translation with Interactive Decoding", |
| "authors": [ |
| { |
| "first": "Yuchen", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiajun", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Long", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongjun", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Conference on Artificial Intelligence (AAAI)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1609/aaai.v34i05.6360" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuchen Liu, Jiajun Zhang, Hao Xiong, Long Zhou, Zhongjun He, Hua Wu, Haifeng Wang, and Chengqing Zong. 2020. Synchronous Speech Recognition and Speech-to-Text Trans- lation with Interactive Decoding. In Conference on Artificial Intelligence (AAAI), New York, NY, USA. DOI: https://doi.org/10 .1609/aaai.v34i05.6360", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "XNMT: The eXtensible Neural Machine Translation Toolkit", |
| "authors": [ |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Sperber", |
| "suffix": "" |
| }, |
| { |
| "first": "Xinyi", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Felix", |
| "suffix": "" |
| }, |
| { |
| "first": "Austin", |
| "middle": [], |
| "last": "Matthews", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarguna", |
| "middle": [], |
| "last": "Padmanabhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ye", |
| "middle": [], |
| "last": "Qi", |
| "suffix": "" |
| }, |
| { |
| "first": "Devendra", |
| "middle": [], |
| "last": "Singh Sachan", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Arthur", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre", |
| "middle": [], |
| "last": "Godard", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Hewitt", |
| "suffix": "" |
| }, |
| { |
| "first": "Rachid", |
| "middle": [], |
| "last": "Riad", |
| "suffix": "" |
| }, |
| { |
| "first": "Liming", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Conference of the Association for Machine Translation in the Americas (AMTA) Open Source Software Showcase", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Graham Neubig, Matthias Sperber, Xinyi Wang, Matthieu Felix, Austin Matthews, Sarguna Padmanabhan, Ye Qi, Devendra Singh Sachan, Philip Arthur, Pierre Godard, John Hewitt, Rachid Riad, and Liming Wang. 2018. XNMT: The eXtensible Neural Machine Translation Toolkit. In Conference of the Association for Machine Translation in the Americas (AMTA) Open Source Software Showcase. Boston, MA, USA.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Approaches to Translation", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Newmark", |
| "suffix": "" |
| } |
| ], |
| "year": 1988, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Newmark. 1988. Approaches to Translation, Prentice Hall, Hertfordshire.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Speech Translation: Coupling of Recognition and Translation", |
| "authors": [ |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "International Conference on Acoustics, Speech, and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "517--520", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hermann Ney. 1999. Speech Translation: Cou- pling of Recognition and Translation. In Inter- national Conference on Acoustics, Speech, and Signal Processing (ICASSP), pages 517-520. Phoenix, AZ, USA.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Likert Scales, Levels of Measurement and the \"Laws\" of Statistics", |
| "authors": [ |
| { |
| "first": "Geoff", |
| "middle": [], |
| "last": "Norman", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Advances in Health Sciences Education", |
| "volume": "15", |
| "issue": "5", |
| "pages": "625--632", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Geoff Norman. 2010. Likert Scales, Levels of Measurement and the \"Laws\" of Statistics. Advances in Health Sciences Education, 15(5):625-632.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "BLEU: A Method For Automatic Evaluation of Machine Translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": ";", |
| "middle": [], |
| "last": "Philadephia", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "A" |
| ], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Usa", |
| "middle": [], |
| "last": "Doi", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Association for Computational Linguistic (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1073083.1073135" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. BLEU: A Method For Automatic Evaluation of Machine Transla- tion. In Association for Computational Lin- guistic (ACL), pages 311-318. Philadephia, PA, USA. DOI: https://doi.org/10 .3115/1073083.1073135", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "PyTorch: An Imperative Style, High-Performance Deep Learning Library", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Paszke", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Massa", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Lerer", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Bradbury", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Chanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Killeen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zeming", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Gimelshein", |
| "suffix": "" |
| }, |
| { |
| "first": "Luca", |
| "middle": [], |
| "last": "Antiga", |
| "suffix": "" |
| }, |
| { |
| "first": "Alban", |
| "middle": [], |
| "last": "Desmaison", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "K\u00f6pf", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zach", |
| "middle": [], |
| "last": "Devito", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Raison", |
| "suffix": "" |
| }, |
| { |
| "first": "Alykhan", |
| "middle": [], |
| "last": "Tejani", |
| "suffix": "" |
| }, |
| { |
| "first": "Sasank", |
| "middle": [], |
| "last": "Chilamkurthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Benoit", |
| "middle": [], |
| "last": "Steiner", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems (NeuIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas K\u00f6pf, Edward Yang, Zach DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. PyTorch: An Imperative Style, High-Performance Deep Learning Library. In Advances in Neural Information Processing Systems (NeuIPS).", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Evaluation Without References: IBM1 Scores as Evaluation Metrics", |
| "authors": [ |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "Popovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Vilar", |
| "suffix": "" |
| }, |
| { |
| "first": "Eleftherios", |
| "middle": [], |
| "last": "Avramidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Aljoscha", |
| "middle": [], |
| "last": "Burchardt", |
| "suffix": "" |
| }, |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "Popovi\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Sixth Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "99--103", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maja Popovi\u0107, David Vilar, Eleftherios Avramidis, Aljoscha Burchardt, and Maja Popovi\u0107. 2011. Evaluation Without References: IBM1 Scores as Evaluation Metrics. In Pro- ceedings of the Sixth Workshop on Sta- tistical Machine Translation, pages 99-103. Edinburgh, Scotland.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "A Call for Clarity in Reporting BLEU Scores", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Conference on Machine Translation (WMT)", |
| "volume": "", |
| "issue": "", |
| "pages": "186--191", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6319" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post. 2019. A Call for Clarity in Reporting BLEU Scores. In Conference on Machine Translation (WMT), pages 186-191. Brussels, Belgium. DOI: https://doi.org/10 .18653/v1/W18-6319", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Are Red Roses Red? Evaluating Consistency of Question-Answering Models", |
| "authors": [ |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Marco Tulio Ribeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Guestrin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Association for Computational Linguistic (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "6174--6184", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1621" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Tulio Ribeiro, Carlos Guestrin, and Sameer Singh. 2019. Are Red Roses Red? Evaluating Consistency of Question-Answering Models. In Association for Computational Linguistic (ACL), pages 6174-6184. Florence, Italy. DOI: https://doi.org/10.18653/v1/P19 -1621", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "No Explainability without Accountability: An Empirical Study of Explanations and Feedback in Interactive ML", |
| "authors": [ |
| { |
| "first": "Alison", |
| "middle": [], |
| "last": "Smith-Renner", |
| "suffix": "" |
| }, |
| { |
| "first": "Ron", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Melissa", |
| "middle": [], |
| "last": "Birchfield", |
| "suffix": "" |
| }, |
| { |
| "first": "Tongshuang", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boydgraber", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "S" |
| ], |
| "last": "Weld", |
| "suffix": "" |
| }, |
| { |
| "first": "Leah", |
| "middle": [], |
| "last": "Findlater", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Conference on Human factors in computing systems (CHI)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3313831.3376624" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alison Smith-Renner, Ron Fan, Melissa Birchfield, Tongshuang Wu, Jordan Boyd- graber, Daniel S. Weld, and Leah Findlater. 2020. No Explainability without Account- ability: An Empirical Study of Explanations and Feedback in Interactive ML. In Confer- ence on Human factors in computing systems (CHI). Honolulu, HI, USA. DOI: https:// doi.org/10.1145/3313831.3376624", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Neural Lattice-to-Sequence Models for Uncertain Inputs", |
| "authors": [ |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Sperber", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1380--1389", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1145" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthias Sperber, Graham Neubig, Jan Niehues, and Alex Waibel. 2017. Neural Lattice-to- Sequence Models for Uncertain Inputs. In Conference on Empirical Methods in Na- tural Language Processing (EMNLP), pages 1380-1389. Copenhagen, Denmark. DOI: https://doi.org/10.18653/v1 /D17-1145", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Attention-Passing Models for Robust and Data-Efficient End-to-End Speech Translation", |
| "authors": [ |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Sperber", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00270" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthias Sperber, Graham Neubig, Jan Niehues, and Alex Waibel. 2019. Attention-Passing Models for Robust and Data-Efficient End-to- End Speech Translation. Transactions of the Association for Computational Linguistics (TACL). DOI: https://doi.org/10.1162 /tacl a 00270", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Machine Translation of Speech", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [ |
| "M" |
| ], |
| "last": "Fred", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "G" |
| ], |
| "last": "Stentiford", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Steer", |
| "suffix": "" |
| } |
| ], |
| "year": 1988, |
| "venue": "British Telecom Technology Journal", |
| "volume": "6", |
| "issue": "2", |
| "pages": "116--123", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fred W. M. Stentiford and M. G. Steer. 1988. Machine Translation of Speech. British Telecom Technology Journal, 6(2):116-123.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Rethinking the Inception Architecture for Computer Vision", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Szegedy", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Vanhoucke", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Ioffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [], |
| "last": "Shlens", |
| "suffix": "" |
| }, |
| { |
| "first": "Zbigniew", |
| "middle": [], |
| "last": "Wojna", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Computer Vision and Pattern Recognition (CVPR)", |
| "volume": "", |
| "issue": "", |
| "pages": "2818--2826", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. 2016. Rethinking the Inception Architecture for Com- puter Vision. In Computer Vision and Pattern Recognition (CVPR), pages 2818-2826. Las Vegas, NV, USA.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Attention Is All You Need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Neural Information Processing Systems Conference (NIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention Is All You Need. In Neural Infor- mation Processing Systems Conference (NIPS), pages 5998-6008. Long Beach, CA, USA.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "JANUS: A Speech-to-Speech Translation System Using Connectionist and Symbolic Processing Strategies", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Waibel", |
| "suffix": "" |
| }, |
| { |
| "first": "Ajay", |
| "middle": [ |
| "N" |
| ], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "Arthur", |
| "middle": [ |
| "E" |
| ], |
| "last": "Mcnair", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroaki", |
| "middle": [], |
| "last": "Saito", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "G" |
| ], |
| "last": "Hauptmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Tebelskis", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "International Conference on Acoustics, Speech, and Signal Processing (ICASSP)", |
| "volume": "", |
| "issue": "", |
| "pages": "793--796", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.1991.150456" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Waibel, Ajay N. Jain, Arthur E. McNair, Hiroaki Saito, Alexander G. Hauptmann, and Joe Tebelskis. 1991. JANUS: A Speech-to- Speech Translation System Using Connec- tionist and Symbolic Processing Strategies. In International Conference on Acoustics, Speech, and Signal Processing (ICASSP), pages 793-796. Toronto, Canada. DOI: https://doi.org/10.1109/ICASSP .1991.150456", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Sequence-to-Sequence Models Can Directly Transcribe Foreign Speech", |
| "authors": [ |
| { |
| "first": "Ron", |
| "middle": [ |
| "J" |
| ], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Chorowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Navdeep", |
| "middle": [], |
| "last": "Jaitly", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Annual Conference of the International Speech Communication Association (InterSpeech)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.21437/Interspeech.2017-503" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ron J. Weiss, Jan Chorowski, Navdeep Jaitly, Yonghui Wu, and Zhifeng Chen. 2017. Sequence-to-Sequence Models Can Directly Transcribe Foreign Speech. In Annual Confer- ence of the International Speech Communica- tion Association (InterSpeech). Stockholm, Sweden. DOI: https://doi.org/10 .21437/Interspeech.2017-503", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Dialog use case. Whenever the transcript or the translation has errors, additional effort is needed.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF1": { |
| "text": "Cascaded and direct model types.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF2": { |
| "text": "Joint models, featuring both coupled inference and end-to-end training.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF4": { |
| "text": "Example for inconsistently spelled names and an inconsistent function word when generating transcript and translation separately using DIRMU.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "TABREF2": { |
| "text": "Comparison of WER, BLEU, lexical consistency (Lex; \u00a73.1) across several language pairs. We compare against state-of-the-art (SOTA) results under same data conditions by Di Gangi et al.(2019), where cc denotes a cascaded model, dir denotes a direct model. Bold font indicates the best score. Results that are not statistically significantly worse than the best score in the same column are in italics (pairwise bootstrap resampling(Koehn, 2004), p<0.05).", |
| "html": null, |
| "content": "<table><tr><td/><td/><td>Transcript</td><td colspan=\"2\">Translation</td><td/><td/><td colspan=\"2\">Consistency</td><td/></tr><tr><td>Model</td><td>Params.</td><td>\u2193 WER</td><td>\u2191 BLEU</td><td>\u2193 CharCut</td><td>\u2193 Lex</td><td>\u2191 Sur</td><td>\u2191 Cor</td><td>\u2191 Cmb</td><td>\u2191 Human</td></tr><tr><td>CASC</td><td>223M</td><td>21.6</td><td>19.2</td><td>47.2</td><td>10.36</td><td>10.65</td><td>0.396</td><td>0.474</td><td>3.119</td></tr><tr><td>DIRIND</td><td>175M</td><td>21.6</td><td>11.0</td><td>60.3</td><td>21.13</td><td>5.24</td><td>0.346</td><td>0.374</td><td>2.195</td></tr><tr><td>DIRMU</td><td>124M</td><td>23.6</td><td>18.4</td><td>48.7</td><td>13.89</td><td>7.07</td><td>0.376</td><td>0.457</td><td>2.715</td></tr><tr><td>DIRSH</td><td>106M</td><td>23.6</td><td>19.0</td><td>47.9</td><td>14.71</td><td>8.54</td><td>0.371</td><td>0.464</td><td>2.776</td></tr><tr><td>2ST</td><td>122M</td><td>22.2</td><td>20.1</td><td>46.1</td><td>9.86</td><td>12.08</td><td>0.391</td><td>0.484</td><td>3.170</td></tr><tr><td>TRI</td><td>141M</td><td>22.2</td><td>19.9</td><td>46.3</td><td>9.72</td><td>11.54</td><td>0.414</td><td>0.484</td><td>3.192</td></tr><tr><td>CONCAT</td><td>106M</td><td>21.9</td><td>19.2</td><td>47.1</td><td>12.79</td><td>9.60</td><td>0.387</td><td>0.477</td><td>2.875</td></tr><tr><td>Reference</td><td>-</td><td>0</td><td>100</td><td>0</td><td>12.6</td><td>13.3</td><td>1</td><td>1</td><td>3.594</td></tr></table>", |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF3": { |
| "text": "", |
| "html": null, |
| "content": "<table><tr><td>: Detailed consistency results, including surface form consistency (Sur; \u00a73.2), correlation of</td></tr><tr><td>error (Cor; \u00a73</td></tr></table>", |
| "type_str": "table", |
| "num": null |
| } |
| } |
| } |
| } |