| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:38:33.958149Z" |
| }, |
| "title": "IST-Unbabel 2021 Submission for the Explainable Quality Estimation Shared Task", |
| "authors": [ |
| { |
| "first": "Marcos", |
| "middle": [ |
| "V" |
| ], |
| "last": "Treviso", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Instituto de Telecomunica\u00e7\u00f5es", |
| "location": { |
| "settlement": "Lisbon", |
| "country": "Portugal" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Nuno", |
| "middle": [ |
| "M" |
| ], |
| "last": "Guerreiro", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Instituto de Telecomunica\u00e7\u00f5es", |
| "location": { |
| "settlement": "Lisbon", |
| "country": "Portugal" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Rei", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Instituto Superior T\u00e9cnico", |
| "location": { |
| "settlement": "Lisbon", |
| "country": "Portugal" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Andr\u00e9", |
| "middle": [ |
| "F T" |
| ], |
| "last": "Martins", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Instituto de Telecomunica\u00e7\u00f5es", |
| "location": { |
| "settlement": "Lisbon", |
| "country": "Portugal" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present the joint contribution of Instituto Superior T\u00e9cnico (IST) and Unbabel to the Explainable Quality Estimation (QE) shared task, where systems were submitted to two tracks: constrained (without word-level supervision) and unconstrained (with word-level supervision). For the constrained track, we experimented with several explainability methods to extract the relevance of input tokens from sentence-level QE models built on top of multilingual pre-trained transformers. Among the different tested methods, composing explanations in the form of attention weights scaled by the norm of value vectors yielded the best results. When word-level labels are used during training, our best results were obtained by using word-level predicted probabilities. We further improve the performance of our methods on the two tracks by ensembling explanation scores extracted from models trained with different pre-trained transformers, achieving strong results for in-domain and zero-shot language pairs.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present the joint contribution of Instituto Superior T\u00e9cnico (IST) and Unbabel to the Explainable Quality Estimation (QE) shared task, where systems were submitted to two tracks: constrained (without word-level supervision) and unconstrained (with word-level supervision). For the constrained track, we experimented with several explainability methods to extract the relevance of input tokens from sentence-level QE models built on top of multilingual pre-trained transformers. Among the different tested methods, composing explanations in the form of attention weights scaled by the norm of value vectors yielded the best results. When word-level labels are used during training, our best results were obtained by using word-level predicted probabilities. We further improve the performance of our methods on the two tracks by ensembling explanation scores extracted from models trained with different pre-trained transformers, achieving strong results for in-domain and zero-shot language pairs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Quality estimation (QE) aims at assessing the quality of a translation system without relying on reference translations (Blatz et al., 2004; Specia et al., 2018) . This paper describes the joint contribution of Instituto Superior T\u00e9cnico (IST) and Unbabel to the Explainable Quality Estimation shared task (Fomicheva et al., 2021a) . The goal of the shared task is to identify translation errors without direct word-level supervision (constrained track) or with access to word-level labels (unconstrained track).", |
| "cite_spans": [ |
| { |
| "start": 120, |
| "end": 140, |
| "text": "(Blatz et al., 2004;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 141, |
| "end": 161, |
| "text": "Specia et al., 2018)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 306, |
| "end": 331, |
| "text": "(Fomicheva et al., 2021a)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recent advances in QE have led to consistent improvements at predicting quality assessments such as Direct Assessments (DAs, Graham et al. 2013) . Traditional QE systems had to predict Human Translation Error Rate (HTER, Snover et al. 2006 ), yet with the advent of neural machine translation, we observed a shift from fluency into ade-quacy errors (Martindale and Carpuat, 2018) . For that reason, DAs started getting used as the groundtruth score for assessing the quality of translations . However, with DAs we lose the ability to generate word-level supervision, impacting the interpretability of sentence-level predictions in terms of lower granularity elements such as word-level translation errors.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 144, |
| "text": "Graham et al. 2013)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 214, |
| "end": 239, |
| "text": "(HTER, Snover et al. 2006", |
| "ref_id": null |
| }, |
| { |
| "start": 349, |
| "end": 379, |
| "text": "(Martindale and Carpuat, 2018)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "At the same time, state-of-the-art QE systems such as OpenKiwi (Kepler et al., 2019b) and Tran-sQuest (Ranasinghe et al., 2020b ) build on top of multilingual pre-trained models such as BERT (Devlin et al., 2019) and XLM-RoBERTa (Conneau et al., 2020) , which are largely responsible for the performance boost we have observed in the last two editions of the WMT QE shared task (Fonseca et al., 2019; . Due to the usage of such overparametrized black-box models, this performance boost also comes at the cost of efficiency and interpretability.", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 85, |
| "text": "(Kepler et al., 2019b)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 102, |
| "end": 127, |
| "text": "(Ranasinghe et al., 2020b", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 191, |
| "end": 212, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 229, |
| "end": 251, |
| "text": "(Conneau et al., 2020)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 378, |
| "end": 400, |
| "text": "(Fonseca et al., 2019;", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Research in explainable NLP uncovered several strategies to interpret models' decisions, either in a post-hoc manner by querying a trained model for extracting perturbation or gradient measures (Ribeiro et al., 2016; Arras et al., 2016) , or by building models that are inherently interpretable (Lei et al., 2016; Chang et al., 2020) . Recent works have also put transformers under the lens of explainability, aiming at unraveling interpretable patterns that clarify how decisions emerge from attention heads and across hidden states at each layer (De Cao et al., 2020; Abnar and Zuidema, 2020; Voita et al., 2021) .", |
| "cite_spans": [ |
| { |
| "start": 194, |
| "end": 216, |
| "text": "(Ribeiro et al., 2016;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 217, |
| "end": 236, |
| "text": "Arras et al., 2016)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 295, |
| "end": 313, |
| "text": "(Lei et al., 2016;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 314, |
| "end": 333, |
| "text": "Chang et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 548, |
| "end": 569, |
| "text": "(De Cao et al., 2020;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 570, |
| "end": 594, |
| "text": "Abnar and Zuidema, 2020;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 595, |
| "end": 614, |
| "text": "Voita et al., 2021)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this shared task, we experiment with several of these methods to extract the relevance of input tokens from sentence-level QE models built on top of multilingual pre-trained transformers 1 . For the constrained track, where models are unaware of word-level supervision, our best results were de-rived from attention-based explanations. When we used word-level labels during training, the best results were obtained by using word-level predicted probabilities. Furthermore, we were able to push the performance further by ensembling explanations for both tracks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Quality Estimation. QE systems are usually designed according to the granularity in which predictions are made: word, sentence, or document-level. The goal of word-level QE is to assign quality labels (OK or BAD) to each machine-translated word, indicating whether that word is a translation error or not. Additionally, current systems also classify source words to denote words in the original sentence that have been mistranslated or omitted in the target. On the other hand, sentence-level QE aims at predicting the quality of the whole translated sentence, either in terms of how many edit operations are required to fix it (HTER) or in terms of human judgments (DA). Similarly, document-level QE systems predict a single outcome (a real score or a ranking index) for an entire document.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Transformers. The multi-head attention mechanism is the bedrock on which transformers are built. They are responsible for contextualizing the information within and across input sentences dynamically (Vaswani et al., 2017) . Concretely, given as input a matrix Q \u2208 R n\u00d7d containing ddimensional representations for n queries, and matrices K, V \u2208 R m\u00d7d for m keys and values, the scaled dot-product attention at a single head is computed as:", |
| "cite_spans": [ |
| { |
| "start": 200, |
| "end": 222, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "att(Q, K, V ) = \u03c0 QK \u221a d Z\u2208R n\u00d7m V \u2208 R n\u00d7d . (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The \u03c0 transformation maps rows to distributions, with softmax being the most common choice, \u03c0(Z) ij = softmax(z i ) j . Multi-head attention is computed by evoking Eq. 1 in parallel for each head h:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "head h (Q, K, V ) = att(QW Q h , KW K h , V W V h ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "W Q h , W K h , W V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "h are learned linear transformations. The output of the multi-head attention module is the concatenation of all k heads followed by a learnable linear transformation W O :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "mh-att(Q, K, V ) = concat(head 1 , ..., head k )W O .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "This way, heads have the capability of learning specialized phenomena. Transformers with only encoder-blocks, such as BERT (Devlin et al., 2019) and XLM-RoBERTa (Conneau et al., 2020) , have only the encoder self-attention, and thus m = n.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 144, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 161, |
| "end": 183, |
| "text": "(Conneau et al., 2020)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Explainability in NLP. There is a large body of work on the analysis and interpretation of models in NLP. Some of these models are built on top of attention mechanisms, which automatically learn a weighted representation of input features. Attention weights provide plausible, but not always faithful, explanations (Jain and Wallace, 2019; Wiegreffe and Pinter, 2019) . In contrast, rationalizers with hard attention are arguably more faithful but require stochastic networks (Lei et al., 2016; Bastings et al., 2019) , with recent works avoiding stochasticity via sparse deterministic selections (Guerreiro and Martins, 2021). Other approaches seek local explanations by considering gradient measures (Arras et al., 2016; Bastings and Filippova, 2020) , or by perturbing the input and querying the classifier in a post-hoc manner (Ribeiro et al., 2016; Kim et al., 2020) . Since transformers are composed of several layers and attention heads, many works analyze and improve the multi-head attention mechanism directly to produce better explanations (Kobayashi et al., 2020; Hao et al., 2021) . More elaborated methods consider the entire flow of information coming from attention weights, hidden states, or gradients to interpret the model's decision (De Cao et al., 2020; Abnar and Zuidema, 2020; Voita et al., 2021) .", |
| "cite_spans": [ |
| { |
| "start": 315, |
| "end": 339, |
| "text": "(Jain and Wallace, 2019;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 340, |
| "end": 367, |
| "text": "Wiegreffe and Pinter, 2019)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 476, |
| "end": 494, |
| "text": "(Lei et al., 2016;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 495, |
| "end": 517, |
| "text": "Bastings et al., 2019)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 702, |
| "end": 722, |
| "text": "(Arras et al., 2016;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 723, |
| "end": 752, |
| "text": "Bastings and Filippova, 2020)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 831, |
| "end": 853, |
| "text": "(Ribeiro et al., 2016;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 854, |
| "end": 871, |
| "text": "Kim et al., 2020)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1051, |
| "end": 1075, |
| "text": "(Kobayashi et al., 2020;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1076, |
| "end": 1093, |
| "text": "Hao et al., 2021)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1253, |
| "end": 1274, |
| "text": "(De Cao et al., 2020;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1275, |
| "end": 1299, |
| "text": "Abnar and Zuidema, 2020;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1300, |
| "end": 1319, |
| "text": "Voita et al., 2021)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The goal of the constrained track is to identify machine translation errors without explicit word-level annotation. More precisely, it aims at performing word-level quality estimation by casting the task as a prediction explainability problem. In the context of QE, explanations can be seen as highlights, representing the relevance of input words w.r.t. the model's prediction via continuous scores. We next describe the datasets, models, and explainability methods that we used for this track.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constrained Track", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Seeking to improve the performance of our models on the zero-shot language pairs (LPs), we used all language pairs from the MLQE-PE dataset ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u03b1 \u22c5 h src (1 -\u03b1) \u22c5 h hyp + Feed-forward Sentence score (L \u00d7 N \u00d7 D) (N src \u00d7 D) (N hyp \u00d7 D) (1 \u00d7 D) (1 \u00d7 D) (1 \u00d7 D) (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Figure 1: General architecture of our models for the constrained track. L represents the number of layers. N src and N hyp represent the number of words in the source and hypothesis sentences, respectively. N = N src + N hyp is the number of words after concatenating the two sentences. D is the size of hidden vectors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "for both tracks. For RO-EN and ET-EN, we evaluated our models on the validation set of these LPs. For the two zero-shot LPs, DE-ZH and RU-DE, we used the 20 sentences made available by the shared task and the validation sets of EN-ZH and EN-DE to improve the robustness of the evaluation of explanations w.r.t. the target language. We used word-level labels to train word-level models for the unconstrained track only. For sentence-level models, we supervise our models using DA scores.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Since QE is a fundamental tool in many MT pipelines, we focus our efforts on designing and explaining QE systems with high sentence-level performance. Therefore, we opted to follow the recent trend in this area (Kepler et al., 2019b; Ranasinghe et al., 2020a) and employed two pre-trained multilingual language models as the feature extractors for our models: XLM-RoBERTa and RemBERT.", |
| "cite_spans": [ |
| { |
| "start": 211, |
| "end": 233, |
| "text": "(Kepler et al., 2019b;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 234, |
| "end": 259, |
| "text": "Ranasinghe et al., 2020a)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence-level Models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The overall architecture of our models is shown in Figure 1 . The tokenized source s = s 1 , ..., s N and hypothesis t = t 1 , ..., t M sentences are concatenated and passed as input to the encoder, which produces hidden state vectors H 0 , ..., H L for each layer 0 \u2264 \u2264 L, where H i \u2208 R (N +M )\u00d7d . Next, all hidden states are fed to a scalar mix module (Peters et al., 2018) that learns a weighted sum of the hidden states of each layer of the encoder, producing a new sequence of aggregated hidden states H L+1 . We split H L+1 into source H src \u2208 R N \u00d7d and hypothesis hidden states H hyp \u2208 R M \u00d7d , which are independently passed to an average pooling layer to get their sentence representations h src and h hyp . We merge both representations via a convex combination with \u03b1 = 0.5 to encourage the model to use both source and hypothesis contexts. Finally, we pass the combined vector to a 2-layered feed-forward module in order to get a sentence score prediction\u0177 \u2208 R. Moreover, attention matrices A 1 , ..., A L are also recovered as a by-product of the forward propagation, where +M ) . The hyperparameters used for training can be found in \u00a7B.", |
| "cite_spans": [ |
| { |
| "start": 1089, |
| "end": 1093, |
| "text": "+M )", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 51, |
| "end": 59, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentence-level Models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "A i \u2208 R (N +M )\u00d7(N", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence-level Models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "XLM-RoBERTa as encoder. We set a XLM-RoBERTa Large (XLM-R, Conneau et al. 2020) as the encoder layer. 2 XLM-R is a cross-lingual transformer pre-trained on massive amounts of multilingual data. It consists of 24 encoder blocks with 16 attention heads each. Following we train our complete model on DAs by using adapters for the XLM-R encoder (Houlsby et al., 2019; Pfeiffer et al., 2020) to adapt it to the domain specific data of the QE task with minimal training effort.", |
| "cite_spans": [ |
| { |
| "start": 59, |
| "end": 79, |
| "text": "Conneau et al. 2020)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 342, |
| "end": 364, |
| "text": "(Houlsby et al., 2019;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 365, |
| "end": 387, |
| "text": "Pfeiffer et al., 2020)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence-level Models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "XLM-RoBERTa for zero-shot LPs. To improve the robustness of XLM-R on out-of-domain data, we used an XLM-RoBERTa Large model that was trained with DA's from the metrics shared task. 3 Next, we set it as the encoder layer, and adapted it for predicting DAs from the MLQE corpus as in . Altogether, the data from the Metrics shared task encompasses 30 language pairs from the news domain-yet, the zeroshot LPs are not included in this set. The hyperparameters and the training regime of this model are the same as the previously described XLM-R. We denote this model as XLM-R-M from here on.", |
| "cite_spans": [ |
| { |
| "start": 181, |
| "end": 182, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence-level Models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "RemBERT as encoder. We replace the XLM-R by a RemBERT model as the encoder layer (Chung et al., 2021). 4 Multilingual BERT (Devlin et al., 2019) has been shown to provide complementary performance to XLM-based models for sentencelevel and word-level QE (Kepler et al., 2019a) . We opted to use RemBERT since it can be seen as a larger multilingual BERT with decoupled input and output embeddings, which helps to accelerate training. It consists of 32 encoder blocks with 18 attention heads each. Rather than aggregating layers with the scalar mix layer, we perform average pooling over the hidden states of the last layer of RemBERT. For training, we simply finetune the whole model with small learning rates.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 144, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 253, |
| "end": 275, |
| "text": "(Kepler et al., 2019a)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence-level Models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Results. Table 1 summarizes the performance of our sentence-level models on the validation set in terms of Pearson correlation for each language pair evaluated in the shared task. For completeness, we show results for the 20 sentences made available by the shared task for DE-ZH and RU-DE. We also include OpenKiwi with a XLM-R Large as the encoder for comparison. We note that results for DE-ZH and RU-DE are noisy due to the small amount of validation data available for these LPs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 9, |
| "end": 16, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentence-level Models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Several explainability methods can be used to extract highlights from a trained model in a post-hoc fashion. It is also possible to design a model that is explainable by construction, such as rationalizers (Lei et al., 2016; Bastings et al., 2019) . We investigate rationalizers, attention, gradient, and perturbation-based methods for this shared task.", |
| "cite_spans": [ |
| { |
| "start": 206, |
| "end": 224, |
| "text": "(Lei et al., 2016;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 225, |
| "end": 247, |
| "text": "Bastings et al., 2019)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Attention-based methods. Since the backbone of our models consists of pre-trained multilingual transformers, we studied their main componentthe multi-head attention mechanism-expecting to find interpretability patterns that assign higher scores to words associated with translation errors. We extracted the following explanations from the multi-head attention mechanism:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 Attention weights: average the attention matrix A row-wise for all heads in all layers, amounting to a total of 24 \u00d7 16 = 384 and 32 \u00d7 18 = 576 explanation vectors a \u2208 R N +M for XLM-R and RemBERT-based models, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 Cross-attention weights: by manual inspection of attention weights, we noticed that some attention heads learn plausible connections from source-to-hypothesis and hypothesis-to-source. Therefore, instead of computing a row-wise average of the entire attention matrix, we average only cross-alignment rows. 5", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 Attention \u00d7 Norm: following the findings of Kobayashi et al. (2020) , we scale attention weights by the norm of value vectors", |
| "cite_spans": [ |
| { |
| "start": 46, |
| "end": 69, |
| "text": "Kobayashi et al. (2020)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "V W V h 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Gradient-based methods. Explanations extracted by storing gradients computed during the backward propagation is a standard tool used to interpret NLP models. For this shared task, we investigate the following gradient-based methods: 6", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 Gradient \u00d7 Hidden States: we compute gradients w.r.t. the hidden states of each layer, and multiply the resultant vectors by the hidden state vectors themselves:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2207 H i \u00d7 H i \u2208 R N +M , for 0 \u2264 i \u2264 L + 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 Gradient \u00d7 Attention: the same as before, but we use the output of the multi-head attention module instead of the hidden states.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 Integrated Gradients: we extract integrated gradient explanations w.r.t. the hidden states of each layer. We use a zero-vector as the baseline. We map gradients to explainability scores by normalizing them by their L2 norm and summing the hidden dimensions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "1 \u2207 H i / \u2207 H i 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Perturbation-based methods. As baselines, we also extracted explanations using LIME (Ribeiro et al., 2016) and a leave-one-out strategy, where we replace the \"erased\" token by the <mask> token, which is used for the masked-language model training of XLM-R and RemBERT.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Rationalizers. We append a differentiable binary mask layer (Bastings et al., 2019) on top of the XLM-R model in order to select which tokens are passed on for an estimator for the prediction of a sentence-level score. For each instance, we take the model representations from the scalar-mix layer and pass it to an encoder module, in which we sample a binary mask z \u2208 [0, 1] N +M from a relaxed Bernoulli distribution (Maddison et al., 2017; Jang et al., 2017) , and pass z [s; t] to an estimator module, which re-embeds the masked input and pass it to a linear output layer. Therefore, good explanations z will aid the estimator in producing good sentence-level scores. In training time, the parameters of the encoder and the estimator are jointly trained. In test time, we do not sample the binary masks. Instead, we use the relaxed Bernoulli distribution probabilities as explanations.", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 83, |
| "text": "(Bastings et al., 2019)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 419, |
| "end": 442, |
| "text": "(Maddison et al., 2017;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 443, |
| "end": 461, |
| "text": "Jang et al., 2017)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explainability Methods", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In this track, we opted to use word-level annotation by incorporating a word-level loss to our previous models. To do this, we apply a map from word pieces to tokens after the scalar mix layer and pass the hidden vectors of each token through a feed-forward layer with a sigmoid activation to predict scores\u0177 i \u2208 [0, 1]. We weight the word-level loss by \u03bb and sum it with the sentencelevel loss. As baseline, we train a XLM-R Large model using OpenKiwi with the default hyperparameters. For all word-level models, we train with \u03bb \u2208 {10 3 , 10 4 , 10 5 } and save the checkpoint with the best performance on the validation set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unconstrained Track", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Results. Table 2 shows the results of our wordlevel models on the validation set in terms of Matthews correlation coefficient (MCC) for each LP evaluated in the shared task. For completeness, we include the results for the 20 available sentences for DE-ZH and RU-DE.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 9, |
| "end": 16, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Unconstrained Track", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Although we can regard the extracted explanations as errors in the translation output, an analogous evaluation of word-level QE is not straightforward since the standard metrics require binary labels rather than continuous scores. Therefore, the explanations are evaluated against the ground-truth word-level labels in terms of the Area Under the Curve (AUC), Average Precision (AP), and Recall at Top-K (R@K) metrics only on the subset of translations that contain errors. Furthermore, since all of our models use subword tokenization, to get explanations for an entire word, we tried aggregating the scores of its word pieces by taking the sum, mean, or max, and we found that taking the sum performs better overall.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Attention heads are better alone. We found that some attention heads (mostly at upper layers) learned to focus on words associated with BAD tags, achieving great performance in terms of AUC and AP on the validation set. We show in Figure 2 the target AUC of different attention heads per layer as a heatmap for RO-EN, with darker colors indicating higher results. 7 We can see that attention heads in layers 18 and 19 perform better than other layers in general, and that some attention heads solely outperform the average of all attention heads for all respective layers. For example, the attention head 3 at layer 18 achieves an AUC score of 0.79, while the average of all attention heads from layer 18 gets an AUC score of 0.74 (5 points difference). The findings are similar for source AUC, with the exception that attention heads at lower layers also seem to achieve comparable, yet not better, results. This behavior was also noted by Fomicheva et al. (2021b) , with the difference that we analyzed attention heads independently rather than averaging them at each layer. Kobayashi et al. (2020) also arrive at similar findings but in terms of alignment error rate in a neural machine translation context.", |
| "cite_spans": [ |
| { |
| "start": 364, |
| "end": 365, |
| "text": "7", |
| "ref_id": null |
| }, |
| { |
| "start": 941, |
| "end": 965, |
| "text": "Fomicheva et al. (2021b)", |
| "ref_id": null |
| }, |
| { |
| "start": 1077, |
| "end": 1100, |
| "text": "Kobayashi et al. (2020)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 231, |
| "end": 239, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Constrained Track", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Attention \u00d7 Norm outperforms other explainers. By scaling attention probabilities by the L2 norm of value vectors, we improved the performance further. All of our best results consist of attention-based explainers, with the majority being the explanations that consider the norm of value vectors. We show the results of our best explainers on the validation set of RO-EN in Table 3 Overall, we observed that attention methods outperform gradient and perturbation methods by a considerable margin, and gradients w.r.t. attention outputs yield better results than gradients w.r.t. hidden states, indicating that the information stored in attention heads is valuable. In Figure 3 we show the attention map of two attention heads that perform well in terms of source AUC and target AUC on the validation set of RO-EN. We noted qualitatively that attention-heads that perform well on source AUC usually focus on cross-sentence tokens, 9 whereas attention-heads that have good results in terms of target AUC usually focus on hypothesis tokens. 9 Cross-sentence tokens are hypoehsis tokens attended by source tokens and also source tokens attended by hypoehsis tokens.", |
| "cite_spans": [ |
| { |
| "start": 1038, |
| "end": 1039, |
| "text": "9", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 374, |
| "end": 381, |
| "text": "Table 3", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 668, |
| "end": 676, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Constrained Track", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Lastly, our strategy of appending a bottleneck layer acting as rationalizer did not work well, achieving worse results than perturbation-based methods. Results for all LPs. We show the results on the validation set for all LPs in Table 4 (left) with the best Attention \u00d7 Norm explanations for each tested encoder. We also report results of ensembled explanations, which are obtained by simply averaging selected Attention \u00d7 Norm explanations from models with different encoders. When comparing single encoders for in-domain LPs, we see that explanations from our XLM-R-based model achieved the best results for source and target metrics on RO-EN, with competitive results on ET-EN, for which explanations from a RemBERT-based model ranked first for source metrics. Despite being a simple strategy, we usually got \u223c2 more points of AUC, AP, and R@K by averaging attention explanations. We note that explanations from XLM-R-M and RemBERT perform well on the 20 sentences made available by the shared task for zero-shot LPs. Between XLM-R and XLM-R-M, explanations from the latter lead to better results for both DE-ZH and RU-DE, suggesting that the additional data from the Metrics shared task might help to improve the robustness for zeroshot LPs. Ensembling explanations also leads to higher performance for zero-shot LPs. However, we note that results for DE-ZH and RU-DE are noisy due to the small amount of validation data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 230, |
| "end": 237, |
| "text": "Table 4", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Constrained Track", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In this track, we used the predicted probabilities of BAD tags from supervised word-level QE models as explanation scores. The results are shown in Table 4 (right). As found in the constrained track, XLM-R and RemBERT-based models perform better for in-domain LPs, while XLM-R-M and Rem-BERT lead to better results for zero-shot LPs. Consistent with our findings in the constrained track, ensembling explanations also reflects in improvements in this track.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 148, |
| "end": 155, |
| "text": "Table 4", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Unconstrained Track", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The official results of the shared task are shown in Table 5 for all LPs. Our final submissions consist of ensembled explanations since they proved to perform better for all LPs in both tracks. More specifically, we ensembled Attention \u00d7 Norm explainers from the models shown in Table 4 (left) for the constrained track; and we ensembled the pre-dicted probabilities of BAD tags from the models shown in Table 4 (right) for the unconstrained track. Overall, results for the unconstrained track are superior to those obtained in the constrained track. However, the opposite is true for DE-ZH, suggesting that extracting rationales from a sentence-level QE model is a promising weak-supervised strategy to identify translation errors.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 53, |
| "end": 60, |
| "text": "Table 5", |
| "ref_id": "TABREF10" |
| }, |
| { |
| "start": 279, |
| "end": 286, |
| "text": "Table 4", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 404, |
| "end": 411, |
| "text": "Table 4", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Official results", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Final remarks. We have shown that the multihead mechanism-the bedrock on which transformers are built-is able to learn the importance of tokens associated with BAD tags. Furthermore, composing explanations in the form of attention probabilities scaled by the norm of value vectors leads to further improvements (Kobayashi et al., 2020) . Ensembling these explanations yields the best results overall for all tested metrics on all LPs, including zero-shot ones.", |
| "cite_spans": [ |
| { |
| "start": 311, |
| "end": 335, |
| "text": "(Kobayashi et al., 2020)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Future work. Transformers are composed of many parameters across a vast amount of heads and layers. Strategies that explore how explanations are formed as we move to upper layers are promising, such as computing attention flows and differentiable binary masks per layer (Abnar and Zuidema, 2020; De Cao et al., 2020) . Moreover, as shown in Figure 4 , we noticed that our best explainers suffer on sentences with higher quality, likely due to the low number of translation errors for those sentences. A simple way to circumvent this problem is to force the explainer to \"focus\" on words associated with lower scores (or to the BAD class in a classification setting). Thus, strategies such as framing the prediction of DA scores as a classification problem or inducing class-wise rationalizers ) can be helpful. This shared task focused only on the intersection between explainability and Quality Estimation, yet for future work we plan to apply explainability methods to recent MT metrics such as COMET (Rei et al., 2020a,b; and BLEURT (Sellam et al., 2020a,b) . ", |
| "cite_spans": [ |
| { |
| "start": 270, |
| "end": 295, |
| "text": "(Abnar and Zuidema, 2020;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 296, |
| "end": 316, |
| "text": "De Cao et al., 2020)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1003, |
| "end": 1024, |
| "text": "(Rei et al., 2020a,b;", |
| "ref_id": null |
| }, |
| { |
| "start": 1036, |
| "end": 1060, |
| "text": "(Sellam et al., 2020a,b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 341, |
| "end": 349, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Our code can be found at: https://github.com/ deep-spin/explainable_qe_shared_task/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://huggingface.co/ xlm-roberta-large 3 https://huggingface.co/Unbabel/ xlm-roberta-wmt-metrics-da 4 https://huggingface.co/google/rembert", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that we can get cross-attentions from XLM-R and RemBERT by selecting only the words of the source that attend to the hypothesis and vice-versa.6 Our implementation is based on Captum: https:// captum.ai/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We got similar findings for ET-EN.8 Results for ET-EN follow the same trend (see \u00a7C).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by the P2020 programs MAIA (contract 045909) and Unbabel4EU (contract 042671), by the European Research Council (ERC StG DeepSPIN 758969), and by the Funda\u00e7\u00e3o para a Ci\u00eancia e Tecnologia through contract UIDB/50008/2020.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| }, |
| { |
| "text": "Our infrastructure consists of 5 machines with the specifications shown in Table 6 . The machines were used interchangeably, and all experiments were executed in a single GPU. Despite having machines with different specifications, we did not observe large differences in the execution time of our models across distinct machines. # GPU CPU 1 4 \u00d7 Titan Xp -12GB 16 \u00d7 AMD Ryzen 1950X @ 3.40GHz -128GB 2 4 \u00d7 GTX 1080 Ti -12GB 8 \u00d7 Intel i7-9800X @ 3.80GHz -128GB 3 3 \u00d7 RTX 2080 Ti -12GB 12 \u00d7 AMD Ryzen 2920X @ 3.50GHz -128GB 4 3 \u00d7 RTX 2080 Ti -12GB 12 \u00d7 AMD Ryzen 2920X @ 3.50GHz -128GB 5.1 4 \u00d7 Quadro RTX 6000 -24GB 12 \u00d7 Intel Xeon Silver 4214 @ 2.20GHz -256GB 5.2 4 \u00d7 RTX 2080 Ti -12GB12 \u00d7 Intel Xeon Silver 4214 @ 2.20GHz -256GB Table 6 : Computing infrastructure.", |
| "cite_spans": [ |
| { |
| "start": 330, |
| "end": 331, |
| "text": "#", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 75, |
| "end": 82, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 728, |
| "end": 735, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Computing infrastructure", |
| "sec_num": null |
| }, |
| { |
| "text": "The hyperparameters used for training are shown in Table 7 . C Full results for the constrained trackFollowing the analysis described in \u00a75.1, we report the best results for each explainability method for XLM-R-based models in Table 8 on the validation set of RO-EN and Table 9 on the validation set of ET-EN. We also report the best explainers based on Attention \u00d7 Norm for XLM-R-M and RemBERT-based models. For explainability methods based on attention weights, we show two attention heads: one with the best performance on source AUC and another with the best performance on target AUC. Besides submitting ensembled explanations, we also made submissions with Attention \u00d7 Norm heads that achieve the top performance on the validation set of RO-EN and ET-EN. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 51, |
| "end": 58, |
| "text": "Table 7", |
| "ref_id": null |
| }, |
| { |
| "start": 227, |
| "end": 234, |
| "text": "Table 8", |
| "ref_id": null |
| }, |
| { |
| "start": 270, |
| "end": 277, |
| "text": "Table 9", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "B Training hyperparameters", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Quantifying attention flow in transformers", |
| "authors": [ |
| { |
| "first": "Samira", |
| "middle": [], |
| "last": "Abnar", |
| "suffix": "" |
| }, |
| { |
| "first": "Willem", |
| "middle": [], |
| "last": "Zuidema", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4190--4197", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.385" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samira Abnar and Willem Zuidema. 2020. Quantify- ing attention flow in transformers. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4190-4197, On- line. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Explaining predictions of non-linear classifiers in NLP", |
| "authors": [ |
| { |
| "first": "Leila", |
| "middle": [], |
| "last": "Arras", |
| "suffix": "" |
| }, |
| { |
| "first": "Franziska", |
| "middle": [], |
| "last": "Horn", |
| "suffix": "" |
| }, |
| { |
| "first": "Gr\u00e9goire", |
| "middle": [], |
| "last": "Montavon", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus-Robert", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Samek", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 1st Workshop on Representation Learning for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1--7", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W16-1601" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Leila Arras, Franziska Horn, Gr\u00e9goire Montavon, Klaus-Robert M\u00fcller, and Wojciech Samek. 2016. Explaining predictions of non-linear classifiers in NLP. In Proceedings of the 1st Workshop on Repre- sentation Learning for NLP, pages 1-7, Berlin, Ger- many. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Interpretable neural predictions with differentiable binary variables", |
| "authors": [ |
| { |
| "first": "Jasmijn", |
| "middle": [], |
| "last": "Bastings", |
| "suffix": "" |
| }, |
| { |
| "first": "Wilker", |
| "middle": [], |
| "last": "Aziz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2963--2977", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1284" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jasmijn Bastings, Wilker Aziz, and Ivan Titov. 2019. Interpretable neural predictions with differentiable binary variables. In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 2963-2977, Florence, Italy. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The elephant in the interpretability room: Why use attention as explanation when we have saliency methods?", |
| "authors": [ |
| { |
| "first": "Jasmijn", |
| "middle": [], |
| "last": "Bastings", |
| "suffix": "" |
| }, |
| { |
| "first": "Katja", |
| "middle": [], |
| "last": "Filippova", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Third BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "149--155", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.blackboxnlp-1.14" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jasmijn Bastings and Katja Filippova. 2020. The ele- phant in the interpretability room: Why use atten- tion as explanation when we have saliency methods? In Proceedings of the Third BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP, pages 149-155, Online. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Confidence estimation for machine translation", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Blatz", |
| "suffix": "" |
| }, |
| { |
| "first": "Erin", |
| "middle": [], |
| "last": "Fitzgerald", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "Simona", |
| "middle": [], |
| "last": "Gandrabur", |
| "suffix": "" |
| }, |
| { |
| "first": "Cyril", |
| "middle": [], |
| "last": "Goutte", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Kulesza", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "COLING 2004: Proceedings of the 20th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "315--321", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Blatz, Erin Fitzgerald, George Foster, Simona Gandrabur, Cyril Goutte, Alex Kulesza, Alberto San- chis, and Nicola Ueffing. 2004. Confidence esti- mation for machine translation. In COLING 2004: Proceedings of the 20th International Conference on Computational Linguistics, pages 315-321, Geneva, Switzerland. COLING.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A Game Theoretic Approach to Class-wise Selective Rationalization", |
| "authors": [ |
| { |
| "first": "Shiyu", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jaakkola", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "32", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shiyu Chang, Yang Zhang, Mo Yu, and Tommi Jaakkola. 2019. A Game Theoretic Approach to Class-wise Selective Rationalization. In Ad- vances in Neural Information Processing Systems, volume 32. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "2020. Invariant rationalization", |
| "authors": [ |
| { |
| "first": "Shiyu", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jaakkola", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "1448--1458", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shiyu Chang, Yang Zhang, Mo Yu, and Tommi Jaakkola. 2020. Invariant rationalization. In Inter- national Conference on Machine Learning, pages 1448-1458. PMLR.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Rethinking Embedding Coupling in Pre-trained Language Models", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hyung Won", |
| "suffix": "" |
| }, |
| { |
| "first": "Thibault", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| }, |
| { |
| "first": "Henry", |
| "middle": [], |
| "last": "Fevry", |
| "suffix": "" |
| }, |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Tsai", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hyung Won Chung, Thibault Fevry, Henry Tsai, Melvin Johnson, and Sebastian Ruder. 2021. Re- thinking Embedding Coupling in Pre-trained Lan- guage Models. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Unsupervised cross-lingual representation learning at scale", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartikay", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Wenzek", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8440--8451", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.747" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 8440- 8451, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "How do decisions emerge across layers in neural models? interpretation with differentiable masking", |
| "authors": [ |
| { |
| "first": "Nicola", |
| "middle": [], |
| "last": "De Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "Sejr" |
| ], |
| "last": "Schlichtkrull", |
| "suffix": "" |
| }, |
| { |
| "first": "Wilker", |
| "middle": [], |
| "last": "Aziz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3243--3255", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.262" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nicola De Cao, Michael Sejr Schlichtkrull, Wilker Aziz, and Ivan Titov. 2020. How do decisions emerge across layers in neural models? interpreta- tion with differentiable masking. In Proceedings of the 2020 Conference on Empirical Methods in Nat- ural Language Processing (EMNLP), pages 3243- 3255, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "The Eval4NLP Shared Task on Explainable Quality Estimation: Overview and Results", |
| "authors": [ |
| { |
| "first": "Marina", |
| "middle": [], |
| "last": "Fomicheva", |
| "suffix": "" |
| }, |
| { |
| "first": "Piyawat", |
| "middle": [], |
| "last": "Lertvittayakumjorn", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Eger", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 2nd Workshop on Evaluation and Comparison of NLP Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marina Fomicheva, Piyawat Lertvittayakumjorn, Wei Zhao, Steffen Eger, and Yang Gao. 2021a. The Eval4NLP Shared Task on Explainable Quality Es- timation: Overview and Results. In Proceedings of the 2nd Workshop on Evaluation and Comparison of NLP Systems.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "and Nikolaos Aletras. 2021b. Translation Error Detection as Rationale Extraction", |
| "authors": [ |
| { |
| "first": "Marina", |
| "middle": [], |
| "last": "Fomicheva", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2108.12197" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marina Fomicheva, Lucia Specia, and Nikolaos Aletras. 2021b. Translation Error Detection as Rationale Ex- traction. arXiv preprint arXiv:2108.12197.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "MLQE-PE: A Multilingual Quality Estimation and Post-Editing Dataset", |
| "authors": [ |
| { |
| "first": "Marina", |
| "middle": [], |
| "last": "Fomicheva", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuo", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Erick", |
| "middle": [], |
| "last": "Fonseca", |
| "suffix": "" |
| }, |
| { |
| "first": "Fr\u00e9d\u00e9ric", |
| "middle": [], |
| "last": "Blain", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Nina", |
| "middle": [], |
| "last": "Lopatina", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| }, |
| { |
| "first": "Andr\u00e9", |
| "middle": [ |
| "F T" |
| ], |
| "last": "Martins", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2010.04480" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marina Fomicheva, Shuo Sun, Erick Fonseca, Fr\u00e9d\u00e9ric Blain, Vishrav Chaudhary, Francisco Guzm\u00e1n, Nina Lopatina, Lucia Specia, and Andr\u00e9 F. T. Martins. 2020. MLQE-PE: A Multilingual Quality Esti- mation and Post-Editing Dataset. arXiv preprint arXiv:2010.04480.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Findings of the WMT 2019 shared tasks on quality estimation", |
| "authors": [ |
| { |
| "first": "Erick", |
| "middle": [], |
| "last": "Fonseca", |
| "suffix": "" |
| }, |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Yankovskaya", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "T" |
| ], |
| "last": "Andr\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Martins", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Fishel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Federmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "3", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5401" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Erick Fonseca, Lisa Yankovskaya, Andr\u00e9 F. T. Martins, Mark Fishel, and Christian Federmann. 2019. Find- ings of the WMT 2019 shared tasks on quality esti- mation. In Proceedings of the Fourth Conference on Machine Translation (Volume 3: Shared Task Papers, Day 2), pages 1-10, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Uncertainty-Aware Machine Translation Evaluation", |
| "authors": [ |
| { |
| "first": "Taisiya", |
| "middle": [], |
| "last": "Glushkova", |
| "suffix": "" |
| }, |
| { |
| "first": "Chrysoula", |
| "middle": [], |
| "last": "Zerva", |
| "suffix": "" |
| }, |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Rei", |
| "suffix": "" |
| }, |
| { |
| "first": "Andr\u00e9", |
| "middle": [ |
| "F T" |
| ], |
| "last": "Martins", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2021, Online. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taisiya Glushkova, Chrysoula Zerva, Ricardo Rei, and Andr\u00e9 F. T. Martins. 2021. Uncertainty-Aware Ma- chine Translation Evaluation. In Findings of the As- sociation for Computational Linguistics: EMNLP 2021, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Continuous measurement scales in human evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Alistair", |
| "middle": [], |
| "last": "Moffat", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Zobel", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 7th Linguistic Annotation Workshop and Interoperability with Discourse", |
| "volume": "", |
| "issue": "", |
| "pages": "33--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yvette Graham, Timothy Baldwin, Alistair Moffat, and Justin Zobel. 2013. Continuous measurement scales in human evaluation of machine translation. In Pro- ceedings of the 7th Linguistic Annotation Workshop and Interoperability with Discourse, pages 33-41, Sofia, Bulgaria. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "SPECTRA: Sparse Structured Text Rationalization", |
| "authors": [ |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Nuno", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Guerreiro", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "T" |
| ], |
| "last": "Andr\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Martins", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nuno Miguel Guerreiro and Andr\u00e9 F. T. Martins. 2021. SPECTRA: Sparse Structured Text Rationalization. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP), Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Self-Attention Attribution: Interpreting Information Interactions Inside Transformer", |
| "authors": [ |
| { |
| "first": "Yaru", |
| "middle": [], |
| "last": "Hao", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ke", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "35", |
| "issue": "", |
| "pages": "12963--12971", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yaru Hao, Li Dong, Furu Wei, and Ke Xu. 2021. Self-Attention Attribution: Interpreting Informa- tion Interactions Inside Transformer. Proceedings of the AAAI Conference on Artificial Intelligence, 35(14):12963-12971.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Parameter-efficient transfer learning for NLP", |
| "authors": [ |
| { |
| "first": "Neil", |
| "middle": [], |
| "last": "Houlsby", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei", |
| "middle": [], |
| "last": "Giurgiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Stanislaw", |
| "middle": [], |
| "last": "Jastrzebski", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruna", |
| "middle": [], |
| "last": "Morrone", |
| "suffix": "" |
| }, |
| { |
| "first": "Quentin", |
| "middle": [], |
| "last": "De Laroussilhe", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Gesmundo", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Attariyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Gelly", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "2790--2799", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for NLP. In International Conference on Machine Learning, pages 2790-2799. PMLR.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Attention is not Explanation", |
| "authors": [ |
| { |
| "first": "Sarthak", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "Byron", |
| "middle": [ |
| "C" |
| ], |
| "last": "Wallace", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "3543--3556", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1357" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sarthak Jain and Byron C. Wallace. 2019. Attention is not Explanation. In Proceedings of the 2019 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Pa- pers), pages 3543-3556, Minneapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Categorical Reparameterization with Gumbel-Softmax", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Jang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shixiang", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Poole", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric Jang, Shixiang Gu, and Ben Poole. 2017. Categor- ical Reparameterization with Gumbel-Softmax.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Unbabel's participation in the WMT19 translation quality estimation shared task", |
| "authors": [ |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Kepler", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonay", |
| "middle": [], |
| "last": "Tr\u00e9nous", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Treviso", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Vera", |
| "suffix": "" |
| }, |
| { |
| "first": "Ant\u00f3nio", |
| "middle": [], |
| "last": "G\u00f3is", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Amin Farajian", |
| "suffix": "" |
| }, |
| { |
| "first": "Ant\u00f3nio", |
| "middle": [ |
| "V" |
| ], |
| "last": "Lopes", |
| "suffix": "" |
| }, |
| { |
| "first": "Andr\u00e9", |
| "middle": [ |
| "F T" |
| ], |
| "last": "Martins", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "3", |
| "issue": "", |
| "pages": "78--84", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5406" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabio Kepler, Jonay Tr\u00e9nous, Marcos Treviso, Miguel Vera, Ant\u00f3nio G\u00f3is, M. Amin Farajian, Ant\u00f3nio V. Lopes, and Andr\u00e9 F. T. Martins. 2019a. Unba- bel's participation in the WMT19 translation qual- ity estimation shared task. In Proceedings of the Fourth Conference on Machine Translation (Volume 3: Shared Task Papers, Day 2), pages 78-84, Flo- rence, Italy. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "OpenKiwi: An open source framework for quality estimation", |
| "authors": [ |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Kepler", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonay", |
| "middle": [], |
| "last": "Tr\u00e9nous", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Treviso", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Vera", |
| "suffix": "" |
| }, |
| { |
| "first": "Andr\u00e9", |
| "middle": [ |
| "F T" |
| ], |
| "last": "Martins", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "117--122", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-3020" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabio Kepler, Jonay Tr\u00e9nous, Marcos Treviso, Miguel Vera, and Andr\u00e9 F. T. Martins. 2019b. OpenKiwi: An open source framework for quality estimation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 117-122, Florence, Italy. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Interpretation of NLP models through input marginalization", |
| "authors": [ |
| { |
| "first": "Siwon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Jihun", |
| "middle": [], |
| "last": "Yi", |
| "suffix": "" |
| }, |
| { |
| "first": "Eunji", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungroh", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3154--3167", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.255" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siwon Kim, Jihun Yi, Eunji Kim, and Sungroh Yoon. 2020. Interpretation of NLP models through input marginalization. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 3154-3167, Online. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Attention is not only a weight: Analyzing transformers with vector norms", |
| "authors": [ |
| { |
| "first": "Goro", |
| "middle": [], |
| "last": "Kobayashi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tatsuki", |
| "middle": [], |
| "last": "Kuribayashi", |
| "suffix": "" |
| }, |
| { |
| "first": "Sho", |
| "middle": [], |
| "last": "Yokoi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kentaro", |
| "middle": [], |
| "last": "Inui", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "7057--7075", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.574" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Goro Kobayashi, Tatsuki Kuribayashi, Sho Yokoi, and Kentaro Inui. 2020. Attention is not only a weight: Analyzing transformers with vector norms. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7057-7075, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Rationalizing neural predictions", |
| "authors": [ |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Lei", |
| "suffix": "" |
| }, |
| { |
| "first": "Regina", |
| "middle": [], |
| "last": "Barzilay", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jaakkola", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "107--117", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1011" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tao Lei, Regina Barzilay, and Tommi Jaakkola. 2016. Rationalizing neural predictions. In Proceedings of the 2016 Conference on Empirical Methods in Nat- ural Language Processing, pages 107-117, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [ |
| "J" |
| ], |
| "last": "Maddison", |
| "suffix": "" |
| }, |
| { |
| "first": "Andriy", |
| "middle": [], |
| "last": "Mnih", |
| "suffix": "" |
| }, |
| { |
| "first": "Yee Whye", |
| "middle": [], |
| "last": "Teh", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. 2017. The Concrete Distribution: A Continuous Re- laxation of Discrete Random Variables.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Fluency over adequacy: A pilot study in measuring user trust in imperfect MT", |
| "authors": [ |
| { |
| "first": "Marianna", |
| "middle": [], |
| "last": "Martindale", |
| "suffix": "" |
| }, |
| { |
| "first": "Marine", |
| "middle": [], |
| "last": "Carpuat", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 13th Conference of the Association for Machine Translation in the Americas", |
| "volume": "1", |
| "issue": "", |
| "pages": "13--25", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marianna Martindale and Marine Carpuat. 2018. Flu- ency over adequacy: A pilot study in measuring user trust in imperfect MT. In Proceedings of the 13th Conference of the Association for Machine Transla- tion in the Americas (Volume 1: Research Track), pages 13-25, Boston, MA. Association for Machine Translation in the Americas.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [ |
| "E" |
| ], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1202" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "AdapterHub: A framework for adapting transformers", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Pfeiffer", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "R\u00fcckl\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Clifton", |
| "middle": [], |
| "last": "Poth", |
| "suffix": "" |
| }, |
| { |
| "first": "Aishwarya", |
| "middle": [], |
| "last": "Kamath", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "46--54", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-demos.7" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Pfeiffer, Andreas R\u00fcckl\u00e9, Clifton Poth, Aish- warya Kamath, Ivan Vuli\u0107, Sebastian Ruder, Kyunghyun Cho, and Iryna Gurevych. 2020. AdapterHub: A framework for adapting transform- ers. In Proceedings of the 2020 Conference on Em- pirical Methods in Natural Language Processing: System Demonstrations, pages 46-54, Online. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "TransQuest at WMT2020: Sentence-level direct assessment", |
| "authors": [ |
| { |
| "first": "Tharindu", |
| "middle": [], |
| "last": "Ranasinghe", |
| "suffix": "" |
| }, |
| { |
| "first": "Constantin", |
| "middle": [], |
| "last": "Orasan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Mitkov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "1049--1055", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tharindu Ranasinghe, Constantin Orasan, and Rus- lan Mitkov. 2020a. TransQuest at WMT2020: Sentence-level direct assessment. In Proceedings of the Fifth Conference on Machine Translation, pages 1049-1055, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "TransQuest: Translation quality estimation with cross-lingual transformers", |
| "authors": [ |
| { |
| "first": "Tharindu", |
| "middle": [], |
| "last": "Ranasinghe", |
| "suffix": "" |
| }, |
| { |
| "first": "Constantin", |
| "middle": [], |
| "last": "Orasan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Mitkov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "5070--5081", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.coling-main.445" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tharindu Ranasinghe, Constantin Orasan, and Ruslan Mitkov. 2020b. TransQuest: Translation quality esti- mation with cross-lingual transformers. In Proceed- ings of the 28th International Conference on Com- putational Linguistics, pages 5070-5081, Barcelona, Spain (Online). International Committee on Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "COMET: A neural framework for MT evaluation", |
| "authors": [ |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Rei", |
| "suffix": "" |
| }, |
| { |
| "first": "Craig", |
| "middle": [], |
| "last": "Stewart", |
| "suffix": "" |
| }, |
| { |
| "first": "Ana", |
| "middle": [ |
| "C" |
| ], |
| "last": "Farinha", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2685--2702", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.213" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ricardo Rei, Craig Stewart, Ana C Farinha, and Alon Lavie. 2020a. COMET: A neural framework for MT evaluation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Process- ing (EMNLP), pages 2685-2702, Online. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Unbabel's participation in the WMT20 metrics shared task", |
| "authors": [ |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Rei", |
| "suffix": "" |
| }, |
| { |
| "first": "Craig", |
| "middle": [], |
| "last": "Stewart", |
| "suffix": "" |
| }, |
| { |
| "first": "Ana", |
| "middle": [ |
| "C" |
| ], |
| "last": "Farinha", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "911--920", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ricardo Rei, Craig Stewart, Ana C Farinha, and Alon Lavie. 2020b. Unbabel's participation in the WMT20 metrics shared task. In Proceedings of the Fifth Conference on Machine Translation, pages 911-920, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Why should i trust you?: Explaining the predictions of any classifier", |
| "authors": [ |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Marco Tulio Ribeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Guestrin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proc. ACM SIGKDD", |
| "volume": "", |
| "issue": "", |
| "pages": "1135--1144", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Tulio Ribeiro, Sameer Singh, and Carlos Guestrin. 2016. Why should i trust you?: Explain- ing the predictions of any classifier. In Proc. ACM SIGKDD, pages 1135-1144. ACM.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "BLEURT: Learning robust metrics for text generation", |
| "authors": [ |
| { |
| "first": "Thibault", |
| "middle": [], |
| "last": "Sellam", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "7881--7892", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.704" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thibault Sellam, Dipanjan Das, and Ankur Parikh. 2020a. BLEURT: Learning robust metrics for text generation. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 7881-7892, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Learning to evaluate translation beyond English: BLEURT submissions to the WMT metrics 2020 shared task", |
| "authors": [ |
| { |
| "first": "Thibault", |
| "middle": [], |
| "last": "Sellam", |
| "suffix": "" |
| }, |
| { |
| "first": "Amy", |
| "middle": [], |
| "last": "Pu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hyung Won", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| }, |
| { |
| "first": "Qijun", |
| "middle": [], |
| "last": "Gehrmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Freitag", |
| "suffix": "" |
| }, |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "921--927", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thibault Sellam, Amy Pu, Hyung Won Chung, Sebas- tian Gehrmann, Qijun Tan, Markus Freitag, Dipan- jan Das, and Ankur Parikh. 2020b. Learning to eval- uate translation beyond English: BLEURT submis- sions to the WMT metrics 2020 shared task. In Pro- ceedings of the Fifth Conference on Machine Trans- lation, pages 921-927, Online. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "A study of translation edit rate with targeted human annotation", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Snover", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [], |
| "last": "Dorr", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Linnea", |
| "middle": [], |
| "last": "Micciulla", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Makhoul", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of Association for Machine Translation in the Americas", |
| "volume": "", |
| "issue": "", |
| "pages": "223--231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, and John Makhoul. 2006. A study of translation edit rate with targeted human annota- tion. In In Proceedings of Association for Machine Translation in the Americas, pages 223-231.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Findings of the WMT 2020 shared task on quality estimation", |
| "authors": [ |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| }, |
| { |
| "first": "Fr\u00e9d\u00e9ric", |
| "middle": [], |
| "last": "Blain", |
| "suffix": "" |
| }, |
| { |
| "first": "Marina", |
| "middle": [], |
| "last": "Fomicheva", |
| "suffix": "" |
| }, |
| { |
| "first": "Erick", |
| "middle": [], |
| "last": "Fonseca", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Andr\u00e9", |
| "middle": [ |
| "F T" |
| ], |
| "last": "Martins", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "743--764", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucia Specia, Fr\u00e9d\u00e9ric Blain, Marina Fomicheva, Er- ick Fonseca, Vishrav Chaudhary, Francisco Guzm\u00e1n, and Andr\u00e9 F. T. Martins. 2020. Findings of the WMT 2020 shared task on quality estimation. In Proceedings of the Fifth Conference on Machine Translation, pages 743-764, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Quality estimation for machine translation", |
| "authors": [ |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| }, |
| { |
| "first": "Carolina", |
| "middle": [], |
| "last": "Scarton", |
| "suffix": "" |
| }, |
| { |
| "first": "Gustavo", |
| "middle": [ |
| "Henrique" |
| ], |
| "last": "Paetzold", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Synthesis Lectures on Human Language Technologies", |
| "volume": "11", |
| "issue": "1", |
| "pages": "1--162", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucia Specia, Carolina Scarton, and Gustavo Henrique Paetzold. 2018. Quality estimation for machine translation. Synthesis Lectures on Human Language Technologies, 11(1):1-162.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Attention is All you Need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems (NeurIPS)", |
| "volume": "30", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is All you Need. In Advances in Neural Information Pro- cessing Systems (NeurIPS), volume 30, pages 5998- 6008. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Analyzing the source and target contributions to predictions in neural machine translation", |
| "authors": [ |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Voita", |
| "suffix": "" |
| }, |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1126--1140", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.acl-long.91" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elena Voita, Rico Sennrich, and Ivan Titov. 2021. An- alyzing the source and target contributions to pre- dictions in neural machine translation. In Proceed- ings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th Interna- tional Joint Conference on Natural Language Pro- cessing (Volume 1: Long Papers), pages 1126-1140, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Attention is not not explanation", |
| "authors": [ |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Wiegreffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuval", |
| "middle": [], |
| "last": "Pinter", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "11--20", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1002" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sarah Wiegreffe and Yuval Pinter. 2019. Attention is not not explanation. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 11-20, Hong Kong, China. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "IST-Unbabel 2021 Submission for the Quality Estimation Shared Task", |
| "authors": [ |
| { |
| "first": "Chrysoula", |
| "middle": [], |
| "last": "Zerva", |
| "suffix": "" |
| }, |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Daan Van Stigt", |
| "suffix": "" |
| }, |
| { |
| "first": "Ana", |
| "middle": [ |
| "C" |
| ], |
| "last": "Rei", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Farinha", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "C" |
| ], |
| "last": "Jos\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Taisiya", |
| "middle": [], |
| "last": "De Souza", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Glushkova", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Vera", |
| "suffix": "" |
| }, |
| { |
| "first": "Andr\u00e9", |
| "middle": [], |
| "last": "Kepler", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Martins", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation, Online. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chrysoula Zerva, Daan van Stigt, Ricardo Rei, Ana C Farinha, Jos\u00e9 G. C. de Souza, Taisiya Glushkova, Miguel Vera, Fabio Kepler, and Andr\u00e9 Martins. 2021. IST-Unbabel 2021 Submission for the Quality Estimation Shared Task. In Proceedings of the Fifth Conference on Machine Translation, Online. Associ- ation for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "0.55 0.55 0.46 0.46 0.56 0.52 0.50 0.53 0.47 0.57 0.64 0.64 0.49 0.51 0.62 0.51 0.70 0.70 0.77 0.49 0.50 0.61 0.57 0.59 0.44 0.50 0.51 0.45 0.52 0.50 0.48 0.55 0.44 0.57 0.42 0.62 0.58 0.46 0.63 0.60 0.44 0.75 0.70 0.55 0.51 0.48 0.50", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "Target AUC of different attention heads at each layer of our XLM-R model for RO-EN. The last tick on the y-axis represents the average of all attention heads.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "text": "Example of two attention maps from particular heads that perform well on source AUC (left) and target AUC (right) for RO-EN.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF3": { |
| "text": "Predicted DAs vs. the average precision of our best explainer on the validation set of RO-EN.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF2": { |
| "text": "Pearson correlation of our sentence-level QE systems by varying the model used as the encoder layer.", |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "text": "Source and target MCC results of our wordlevel QE systems by varying the model used as the encoder layer. The values of \u03bb for each model are: 10 3 , 10 4 , 10 4 , 10 4 .", |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF6": { |
| "text": "Constrained track results for different explainability methods on the validation set of RO-EN using XLM-R as encoder.", |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF8": { |
| "text": "Constrained (left) and unconstrained (right) track results on the validation set for all LPs using the Attention \u00d7 Norm explainer.", |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF10": { |
| "text": "Official test set results for constrained (top) and unconstrained (bottom) tracks.", |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |