| { |
| "paper_id": "P13-1004", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:33:18.619606Z" |
| }, |
| "title": "Modelling Annotator Bias with Multi-task Gaussian Processes: An Application to Machine Translation Quality Estimation", |
| "authors": [ |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Sheffield Sheffield", |
| "location": { |
| "country": "United Kingdom" |
| } |
| }, |
| "email": "t.cohn@sheffield.ac.uk" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Sheffield Sheffield", |
| "location": { |
| "country": "United Kingdom" |
| } |
| }, |
| "email": "l.specia@sheffield.ac.uk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Annotating linguistic data is often a complex, time consuming and expensive endeavour. Even with strict annotation guidelines, human subjects often deviate in their analyses, each bringing different biases, interpretations of the task and levels of consistency. We present novel techniques for learning from the outputs of multiple annotators while accounting for annotator specific behaviour. These techniques use multi-task Gaussian Processes to learn jointly a series of annotator and metadata specific models, while explicitly representing correlations between models which can be learned directly from data. Our experiments on two machine translation quality estimation datasets show uniform significant accuracy gains from multi-task learning, and consistently outperform strong baselines.", |
| "pdf_parse": { |
| "paper_id": "P13-1004", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Annotating linguistic data is often a complex, time consuming and expensive endeavour. Even with strict annotation guidelines, human subjects often deviate in their analyses, each bringing different biases, interpretations of the task and levels of consistency. We present novel techniques for learning from the outputs of multiple annotators while accounting for annotator specific behaviour. These techniques use multi-task Gaussian Processes to learn jointly a series of annotator and metadata specific models, while explicitly representing correlations between models which can be learned directly from data. Our experiments on two machine translation quality estimation datasets show uniform significant accuracy gains from multi-task learning, and consistently outperform strong baselines.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Most empirical work in Natural Language Processing (NLP) is based on supervised machine learning techniques which rely on human annotated data of some form or another. The annotation process is often time consuming, expensive, and prone to errors; moreover there is often considerable disagreement amongst annotators.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In general, the predominant perspective to deal with these data annotation issues in previous work has been that there is a single underlying ground truth, and that the annotations collected are noisy and/or biased samples of this. The challenge is then one of quality control, in order to process the data by filtering, averaging or similar to distil the truth. We posit that this perspective is too limiting, especially with respect to linguistic data, where each individual's idiolect and linguistic background can give rise to many different -and yet equally valid -truths. Particularly in highly subjective annotation tasks, the differences between annotators cannot be captured by simple models such as scaling all instances of a certain annotator by a factor. They can originate from a number of nuanced aspects. This is the case, for example, of annotations on the quality of sentences generated using machine translation (MT) systems, which are often used to build quality estimation models (Blatz et al., 2004; Specia et al., 2009) -our application of interest.", |
| "cite_spans": [ |
| { |
| "start": 1000, |
| "end": 1020, |
| "text": "(Blatz et al., 2004;", |
| "ref_id": null |
| }, |
| { |
| "start": 1021, |
| "end": 1041, |
| "text": "Specia et al., 2009)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In addition to annotators' own perceptions and expectations with respect to translation quality, a number of factors can affect their judgements on specific sentences. For example, certain annotators may prefer translations produced by rulebased systems as these tend to be more grammatical, while others would prefer sentences produced by statistical systems with more adequate lexical choices. Likewise, some annotators can be biased by the complexity of the source sentence: lengthy sentences are often (subconsciously) assumed to be of low quality by some annotators. An extreme case is the judgement of quality through post-editing time: annotators have different typing speeds, as well as levels of expertise in the task of post-editing, proficiency levels in the language pair, and knowledge of the terminology used in particular sentences. These variations result in time measurements that are not comparable across annotators. Thus far, the use of post-editing time has been done on an per-annotator basis (Specia, 2011) , or simply averaged across multiple translators (Plitt and Masselot, 2010) , both strategies far from ideal.", |
| "cite_spans": [ |
| { |
| "start": 1015, |
| "end": 1029, |
| "text": "(Specia, 2011)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 1079, |
| "end": 1105, |
| "text": "(Plitt and Masselot, 2010)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Overall, these myriad of factors affecting quality judgements make the modelling of multiple annotators a very challenging problem. This problem is exacerbated when annotations are provided by non-professional annotators, e.g., through crowdsourcing -a common strategy used to make annotation cheaper and faster, however at the cost of less reliable outcomes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Most related work on quality assurance for data annotation has been developed in the context of crowdsourcing. Common practices include filtering out annotators who substantially deviate from a gold-standard set or present unexpected behaviours (Raykar et al., 2010; Raykar and Yu, 2012) , or who disagree with others using, e.g., majority or consensus labelling (Snow et al., 2008; Sheng et al., 2008) . Another relevant strand of work aims to model legitimate, systematic biases in annotators (including both non-experts and experts), such as the fact that some annotators tend to be more negative than others, and that some annotators use a wider or narrower range of values (Flach et al., 2010; Ipeirotis et al., 2010) . However, with a few exceptions in Computer Vision (e.g., Whitehill et al. (2009) , Welinder et al. (2010) ), existing work disregard metadata and its impact on labelling.", |
| "cite_spans": [ |
| { |
| "start": 245, |
| "end": 266, |
| "text": "(Raykar et al., 2010;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 267, |
| "end": 287, |
| "text": "Raykar and Yu, 2012)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 363, |
| "end": 382, |
| "text": "(Snow et al., 2008;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 383, |
| "end": 402, |
| "text": "Sheng et al., 2008)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 671, |
| "end": 698, |
| "text": "values (Flach et al., 2010;", |
| "ref_id": null |
| }, |
| { |
| "start": 699, |
| "end": 722, |
| "text": "Ipeirotis et al., 2010)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 782, |
| "end": 805, |
| "text": "Whitehill et al. (2009)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 808, |
| "end": 830, |
| "text": "Welinder et al. (2010)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper we model the task of predicting the quality of sentence translations using datasets that have been annotated by several judges with different levels of expertise and reliability, containing translations from a variety of MT systems and on a range of different types of sentences. We address this problem using multi-task learning in which we learn individual models for each context (the task, incorporating the annotator and other metadata: translation system and the source sentence) while also modelling correlations between tasks such that related tasks can mutually inform one another. Our use of multi-task learning allows the modelling of a diversity of truths, while also recognising that they are rarely independent of one another (annotators often agree) by explicitly accounting for inter-annotator correlations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our approach is based on Gaussian Processes (GPs) (Rasmussen and Williams, 2006) , a kernelised Bayesian non-parametric learning framework. We develop multi-task learning models by representing intra-task transfer simply and explicitly as part of a parameterised kernel function. GPs are an extremely flexible probabilistic framework and have been successfully adapted for multi-task learning in a number of ways, e.g., by learning multi-task correlations (Bonilla et al., 2008) , modelling per-task variance (Groot et al., 2011) or perannotator biases (Rogers et al., 2010) . Our method builds on the work of Bonilla et al. (2008) by explicitly modelling intra-task transfer, which is learned automatically from the data, in order to robustly handle outlier tasks and task variances. We show in our experiments on two translation quality datasets that these multi-task learning strategies are far superior to training individual per-task models or a single pooled model, and moreover that our multi-task learning approach can achieve similar performance to these baselines using only a fraction of the training data.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 80, |
| "text": "(Rasmussen and Williams, 2006)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 456, |
| "end": 478, |
| "text": "(Bonilla et al., 2008)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 509, |
| "end": 529, |
| "text": "(Groot et al., 2011)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 553, |
| "end": 574, |
| "text": "(Rogers et al., 2010)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 610, |
| "end": 631, |
| "text": "Bonilla et al. (2008)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In addition to showing empirical performance gains on quality estimation applications, an important contribution of this paper is in introducing Gaussian Processes to the NLP community, 1 a technique that has great potential to further performance in a wider range of NLP applications. Moreover, the algorithms proposed herein can be adapted to improve future annotation efforts, and subsequent use of noisy crowd-sourced data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Quality estimation (QE) for MT aims at providing an estimate on the quality of each translated segment -typically a sentence -without access to reference translations. Work in this area has become increasingly popular in recent years as a consequence of the widespread use of MT among realworld users such as professional translators. Examples of applications of QE include improving post-editing efficiency by filtering out low quality segments which would require more effort and time to correct than translating from scratch (Specia et al., 2009) , selecting high quality segments to be published as they are, without post-editing (Soricut and Echihabi, 2010) , selecting a translation from either an MT system or a translation memory for post-editing (He et al., 2010) , selecting the best translation from multiple MT systems (Specia et al., 2010) , and highlighting subsegments that need revision (Bach et al., 2011) .", |
| "cite_spans": [ |
| { |
| "start": 528, |
| "end": 549, |
| "text": "(Specia et al., 2009)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 634, |
| "end": 662, |
| "text": "(Soricut and Echihabi, 2010)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 755, |
| "end": 772, |
| "text": "(He et al., 2010)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 831, |
| "end": 852, |
| "text": "(Specia et al., 2010)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 903, |
| "end": 922, |
| "text": "(Bach et al., 2011)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quality Estimation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "QE is generally addressed as a machine learning task using a variety of linear and kernel-based regression or classification algorithms to induce models from examples of translations described through a number of features and annotated for quality. For an overview of various algorithms and features we refer the reader to the WMT12 shared task on QE (Callison-Burch et al., 2012) .", |
| "cite_spans": [ |
| { |
| "start": 351, |
| "end": 380, |
| "text": "(Callison-Burch et al., 2012)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quality Estimation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "While initial work used annotations derived from automatic MT evaluation metrics (Blatz et al., 2004 ) such as BLEU (Papineni et al., 2002) at training time, it soon became clear that human labels result in significantly better models (Quirk, 2004) . Current work at sentence level is thus based on some form of human supervision. As typical of subjective annotation tasks, QE datasets should contain multiple annotators to lead to models that are representative. Therefore, work in QE faces all common issues regarding variability in annotators' judgements. The following are a few other features that make our datasets particularly interesting:", |
| "cite_spans": [ |
| { |
| "start": 81, |
| "end": 100, |
| "text": "(Blatz et al., 2004", |
| "ref_id": null |
| }, |
| { |
| "start": 116, |
| "end": 139, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 235, |
| "end": 248, |
| "text": "(Quirk, 2004)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quality Estimation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 In order to minimise annotation costs, translation instances are often spread among annotators, such that each instance is only labelled by one or a few judges. In fact, for a sizeable dataset (thousands of instances), the annotation of a complete dataset by a single judge may become infeasible. \u2022 It is often desirable to include alternative translations of source sentences produced by multiple MT systems, which requires multiple annotators for unbiased judgements, particularly for labels such as post-editing time (a translation seen a second time will require less editing effort). \u2022 For crowd-sourced annotations it is often impossible to ensure that the same annotators will label the same subset of cases. These features -which are also typical of many other linguistic annotation tasks -make the learning process extremely challenging. Learning models from datasets annotated by multiple annotators remains an open challenge in QE, as we show in Section 4. In what follows, we present our QE datasets in more detail.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quality Estimation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We use two freely available QE datasets to experiment with the techniques proposed in this paper: 2 WMT12: This dataset was distributed as part of the WMT12 shared task on QE (Callison-Burch et al., 2012). It contains 1, 832 instances for training, and 422 for test. The English source sentences are a subset of WMT09-12 test sets. The Spanish MT outputs were created using a standard PBSMT Moses engine. Each instance was annotated with post-editing effort scores from highest effort (score 1) to lowest effort (score 5), where each score identifies an estimated percentage of the MT output that needs to be corrected. The post-editing effort scores were produced independently by three professional translators based on a previously post-edited translation by a fourth translator. In an attempt to accommodate for systematic biases among annotators, the final effort score was computed as the weighted average between the three PE-effort scores, with more weight given to the judges with higher standard deviation from their own mean score. This resulted in scores spread more evenly in the [1, 5] range.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "WPTP12: This dataset was distributed by Koponen et al. (2012) . It contains 299 English sentences translated into Spanish using two or more of eight MT systems randomly selected from all system submissions for WMT11 (Callison-Burch et al., 2011) . These MT systems range from online and customised SMT systems to commercial rule-based systems. Translations were post-edited by humans while time was recorded. The labels are the number of seconds spent by a translator editing a sentence normalised by source sentence length. The post-editing was done by eight native speakers of Spanish, including five professional translators and three translation students. Only 20 translations were edited by all eight annotators, with the remaining translations randomly distributed amongst them. The resulting dataset contains 1, 624 instances, which were randomly split into 1, 300 for training and 300 for test. According to the analysis in (Koponen et al., 2012) , while on average certain translators were found to be faster than others, their speed in post-editing individual sentences varies considerably, i.e., certain translators are faster at certain sentences. To our knowledge, no previous work has managed to successfully model the prediction of post-editing time from datasets with multiple annotators.", |
| "cite_spans": [ |
| { |
| "start": 40, |
| "end": 61, |
| "text": "Koponen et al. (2012)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 216, |
| "end": 245, |
| "text": "(Callison-Burch et al., 2011)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 932, |
| "end": 954, |
| "text": "(Koponen et al., 2012)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Machine learning models for quality estimation typically treat the problem as regression, seeking to model the relationship between features of the text input and the human quality judgement as a continuous response variable. Popular choices include Support Vector Machines (SVMs), which have been shown to perform well for quality estimation (Callison-Burch et al., 2012) using nonlinear kernel functions such as radial basis func-tions. In this paper we consider Gaussian Processes (GP) (Rasmussen and Williams, 2006) , a probabilistic machine learning framework incorporating kernels and Bayesian non-parametrics, widely considered state-of-the-art for regression. Despite this GPs have not been used widely to date in statistical NLP. GPs are particularly suitable for modelling QE for a number of reasons: 1) they explicitly model uncertainty, which is rife in QE datasets; 2) they allow fitting of expressive kernels to data, in order to modulate the effect of features of varying usefulness; and 3) they can naturally be extended to model correlated tasks using multitask kernels. We now give a brief overview of GPs, following Rasmussen and Williams (2006) .", |
| "cite_spans": [ |
| { |
| "start": 489, |
| "end": 519, |
| "text": "(Rasmussen and Williams, 2006)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1135, |
| "end": 1164, |
| "text": "Rasmussen and Williams (2006)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In our regression task 3 the data consists of n pairs D = {(x i , y i )}, where x i \u2208 R F is a Fdimensional feature vector and y i \u2208 R is the response variable. Each instance is a translation and the feature vector encodes its linguistic features; the response variable is a numerical quality judgement: post editing time or likert score. As usual, the modelling challenge is to automatically predict the value of y based on the x for unseen test input.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "GP regression assumes the presence of a latent function, f : R F \u2192 R, which maps from the input space of feature vectors x to a scalar. Each response value is then generated from the function evaluated at the corresponding data point,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "y i = f (x i ) + \u03b7, where \u03b7 \u223c N (0, \u03c3 2 n )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "is added white-noise. Formally f is drawn from a GP prior,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "f (x) \u223c GP 0, k(x, x ) ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "which is parameterised by a mean (here, 0) and a covariance kernel function k(x, x ). The kernel function represents the covariance (i.e., similarities in the response) between pairs of data points. Intuitively, points that are in close proximity should have high covariance compared to those that are further apart, which constrains f to be a smoothly varying function of its inputs. This intuition is embodied in the squared exponential kernel (a.k.a. radial basis function or Gaussian),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "k(x, x ) = \u03c3 2 f exp \u2212 1 2 (x \u2212 x ) T A \u22121 (x \u2212 x ) (1) where \u03c3 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "f is a scaling factor describing the overall levels of variance, and A = diag(a) is a diagonal matrix of length scales, encoding the smoothness of functions f with respect to each feature. Nonuniform length scales allow for different degrees of smoothness of f in each dimension, such that e.g., for unimportant features f is relatively flat whereas for very important features f is jagged, such that a small change in the feature value has a large effect. When the values of a are learned automatically from data, as we do herein, this is referred to as the automatic relevance determination (ARD) kernel.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Given the generative process defined above, we formulate prediction as Bayesian inference under the posterior, namely", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "p(y * |x * , D) = f p(y * |x * , f )p(f |D)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where x * is a test input and y * is its response value. The posterior p(f |D) reflects our updated belief over possible functions after observing the training set D, i.e., f should pass close to the response values for each training instance (but need not fit exactly due to additive noise). This is balanced against the smoothness constraints that arise from the GP prior. The predictive posterior can be solved analytically, resulting in", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y * \u223c N k T * (K + \u03c3 2 n I) \u22121 y,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "k(x * , x * ) \u2212 k T * (K + \u03c3 2 n I) \u22121 k * where k * = [k(x * , x 1 ) k(x * , x 2 ) \u2022 \u2022 \u2022 k(x * , x n )] T", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "are the kernel evaluations between the test point and the training set, and {K ij = k(x i , x j )} is the kernel (gram) matrix over the training points. Note that the posterior in Eq. 2 includes not only the expected response (the mean) but also the variance, encoding the model's uncertainty, which is important for integration into subsequent processing, e.g., as part of a larger probabilistic model. GP regression also permits an analytic formulation of the marginal likelihood, p(y|X) = f p(y|X, f )p(f ), which can be used for model training (X are the training inputs). Specifically, we can derive the gradient of the (log) marginal likelihood with respect to the model hyperparameters (i.e., a, \u03c3 n , \u03c3 s etc.) and thereby find the type II maximum likelihood estimate using gradient ascent. Note that in general the marginal likelihood is non-convex in the hyperparameter values, and consequently the solutions may only be locally optimal. Here we bootstrap the learning of complex models with many hyperparameters by initialising with the (good) solutions found for simpler models, thereby avoiding poor local optima. We refer the reader to Rasmussen and Williams (2006) for further details.", |
| "cite_spans": [ |
| { |
| "start": 1150, |
| "end": 1179, |
| "text": "Rasmussen and Williams (2006)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "At first glance GPs resemble SVMs, which also admit kernels such as the popular squared exponential kernel in Eq. 1. The key differences are that GPs are probabilistic models and support exact Bayesian inference in the case of regression (approximate inference is required for classification (Rasmussen and Williams, 2006) ). Moreover GPs provide greater flexibility in fitting the kernel hyperparameters even for complex composite kernels. In typical usage, the kernel hyperparameters for an SVM are fit using held-out estimation, which is inefficient and often involves tying together parameters to limit the search complexity (e.g., using a single scale parameter in the squared exponential). Multiple-kernel learning (G\u00f6nen and Alpayd\u0131n, 2011) goes some way to addressing this problem within the SVM framework, however this technique is limited to reweighting linear combinations of kernels and has high computational complexity.", |
| "cite_spans": [ |
| { |
| "start": 292, |
| "end": 322, |
| "text": "(Rasmussen and Williams, 2006)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 721, |
| "end": 747, |
| "text": "(G\u00f6nen and Alpayd\u0131n, 2011)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gaussian Process Regression", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Until now we have considered a standard regression scenario, where each training point is labelled with a single output variable. In order to model multiple different annotators jointly, i.e., multitask learning, we need to extend the model to handle many tasks. Conceptually, we can consider the multi-task model drawing a latent function for each task, f m (x), where m \u2208 1, ..., M is the task identifier. This function is then used to explain the response values for all the instances for that task (subject to noise). Importantly, for multi-task learning to be of benefit, the prior over {f m } must correlate the functions over different tasks, e.g., by imposing similarity constraints between the values for f m (x) and f m (x).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Gaussian Process Models", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We can consider two alternative perspectives for framing the multi-task learning problem: either isotopic where we associate each input point x with a vector of outputs, y \u2208 R M , one for each of the M tasks; or heterotopic where some of the outputs are missing, i.e., tasks are not constrained to share the same data points (Alvarez et al., 2011) . Given the nature of our datasets, we opted for the heterotopic approach, which can handle both singly annotated and multiply annotated data. This can be implemented by augmenting each input point with an additional task identity feature, which is paired with a single y response, and integrated into a GP model with the standard training and inference algorithms. 4 In moving to a task-augmented data representation, we need to revise our kernel function. We use a separable multi-task kernel (Bonilla et al., 2008; Alvarez et al., 2011) of the form", |
| "cite_spans": [ |
| { |
| "start": 325, |
| "end": 347, |
| "text": "(Alvarez et al., 2011)", |
| "ref_id": null |
| }, |
| { |
| "start": 714, |
| "end": 715, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 843, |
| "end": 865, |
| "text": "(Bonilla et al., 2008;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 866, |
| "end": 887, |
| "text": "Alvarez et al., 2011)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Gaussian Process Models", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "k (x, d), (x , d ) = k data (x, x )B d,d ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Multi-task Gaussian Process Models", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where k data (x, x ) is a standard kernel over the input points, typically a squared exponential (see Eq. 1), and B \u2208 R D\u00d7D is a positive semi-definite matrix encoding task covariances. We develop a series of increasingly complex choices for B, which we compare empirically in Section 4.2:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Gaussian Process Models", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Independent The simplest case is where B = I, i.e., all pairs of different tasks have zero covariance. This corresponds to independent modelling of each task, although all models share the same data kernel, so this setting is not strictly equivalent to independent training with independent pertask data kernels (with different hyperparameters). Similarly, we might choose to use a single noise variance, \u03c3 2 n , or an independent noise variance hyperparameter per task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Gaussian Process Models", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Pooled Another extreme is B = 1, which ignores the task identity, corresponding to pooling the multi-task data into one large set. Groot et al. (2011) present a method for applying GPs for modelling multi-annotator data using this pooling kernel with independent per-task noise terms. They show on synthetic data experiments that this approach works well at extracting the signal from noise-corrupted inputs.", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 150, |
| "text": "Groot et al. (2011)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Gaussian Process Models", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Combined A simple approach for B is a weighted combination of Independent and Pool, i.e., B = 1 + aI, where the hyperparameter a \u2265 0 controls the amount of inter-task transfer between each task and the global 'pooled' task. 5 For dissimilar tasks, a high value of a allows each task to be modelled independently, while for more similar tasks low a allows the use of a large pool of similar data. A scaled version of this kernel has been shown to correspond to mean regularisation in SVMs when combined with a linear data kernel (Evgeniou et al., 2006) . A similar multi-task kernel was proposed by Daum\u00e9 III (2007) , using a linear data kernel and a = 1, which has shown to result in excellent performance across a range of NLP problems. In contrast to these earlier approaches, we learn the hyperparameter a directly, fitting the relative amounts of inter-versus intratask transfer to the dataset.", |
| "cite_spans": [ |
| { |
| "start": 528, |
| "end": 551, |
| "text": "(Evgeniou et al., 2006)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 598, |
| "end": 614, |
| "text": "Daum\u00e9 III (2007)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Gaussian Process Models", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Combined+ We consider an extension to the Combined kernel, B = 1 + diag(a), a d \u2265 0 in which each task has a different hyperparameter modulating its independence from the global pool. This additional flexibility can be used, e.g., to allow individual outlier annotators to be modelled independently of the others, by assigning a high value to a d . In contrast, Combined ties together the parameters for all tasks, i.e., all annotators are assumed to have similar quality in that they deviate from the mean to the same degree.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Gaussian Process Models", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The approaches above assume that the data is split into an unstructured set of M tasks, e.g., by annotator. However, it is often the case that we have additional information about each data instance in the form of metadata. In our quality estimation experiments we consider as metadata the MT system which produced the translation, and the identity of the source sentence being translated. Many other types of metadata, such as the level of experience of the annotator, could also be used. One way of integrating such metadata would be to define a separate task for every observed combination of metadata values, in which case we treat the metadata as a task descriptor. Doing so naively would however incur a significant penalty, as each task will have very few training instances resulting in inaccurate models, even with the inter-task kernel approaches defined above. We instead extend the task-level kernels to use the task descriptors directly to represent task correlations. Let B (i) be a square covariance matrix for the i th task descriptor of M , with a column and row for each value (e.g., annotator identity, translation system, etc.). We redefine the task level kernel using paired inputs (x, m), where m are the task descriptors,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating metadata", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "k (x, m), (x , m ) = k data (x, x ) M i=1 B (i) m i ,m i .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating metadata", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "This is equivalent to using a structured task-kernel", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating metadata", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "B = B (1) \u2297 B (3) \u2297 \u2022 \u2022 \u2022 \u2297 B (M )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating metadata", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where \u2297 is the Kronecker product. Using this formulation we can consider any of the above choices for B applied to each task descriptor. In our experiments we consider the Combined and Combined+ kernels, which allow the model to learn the relative importance of each descriptor in terms of independent modelling versus pooling the data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating metadata", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "4 Multi-task Quality Estimation", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating metadata", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Feature sets: In all experiments we use 17 shallow QE features that have been shown to perform well in previous work. These were used by a highly competitive baseline entry in the WMT12 shared task, and were extracted here using the system provided by that shared task. 6 They include simple counts, e.g., the tokens in sentences, as well as source and target language model probabilities. Each feature was scaled to have zero mean and unit standard deviation on the training set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Baselines: The baselines use the SVM regression algorithm with radial basis function kernel and parameters \u03b3, and C optimised through gridsearch and 5-fold cross validation on the training set. This is generally a very strong baseline: in the WMT12 QE shared task, only five out of 19 submissions were able to significantly outperform it, and only by including many complex additional features, tree kernels, etc. We also present \u00b5, a trivial baseline based on predicting for each test instance the training mean (overall, and for specific tasks).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "GP: All GP models were implemented using the GPML Matlab toolbox. 7 Hyperparameter optimisation was performed using conjugate gradient ascent of the log marginal likelihood function, with up to 100 iterations. The simpler models were initialised with all hyperparameters set to one, while more complex models were initialised using the Table 1 : Single-task learning results on the WMT12 dataset, trained and evaluated against the weighted averaged response variable. \u00b5 is a baseline which predicts the training mean, SVM uses the same system as the WMT12 QE task, and the remainder are GP regression models with different kernels (all include additive noise).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 336, |
| "end": 343, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "solution for a simpler model. For instance, models using ARD kernels were initialised from an equivalent isotropic kernel (which ties all the hyperparameters together), and independent per-task noise models were initialised from a single noise model. This approach was more reliable than random restarts in terms of accuracy and runtime efficiency.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Evaluation: We evaluate predictive accuracy using two measures: mean absolute error,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "MAE = 1 N N i=1 |y i \u2212\u0177 i | and root mean square error, RMSE = 1 N N i=1 (y i \u2212\u0177 i ) 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": ", where y i are the gold standard response values and\u0177 i are the model predictions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Our experiments aim to demonstrate the efficacy of GP regression, both the single task and multitask settings, compared to competitive baselines.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We start by comparing GP regression with alternative approaches using the WMT12 dataset on the standard task of predicting a weighted mean quality rating (as it was done in the WMT12 QE shared task). Table 1 shows the results for baseline approaches and the GP models, using a variety of different kernels (see Rasmussen and Williams (2006) for details of the kernel functions). From this we can see that all models do much better than the mean baseline and that most of the GP models have lower error than the state-of-the-art SVM. In terms of kernels, the linear kernel performs comparatively worse than non-linear kernels. Overall the squared exponen- Table 2 : Results on the WMT12 dataset, trained and evaluated over all three annotator's judgements. Shown above are the training mean baseline \u00b5, single-task learning approaches, and multitask learning models, with the columns showing macro average error rates over all three response values. All systems use a squared exponential ARD kernel in a product with the named taskkernel, and with added noise (per-task noise is denoted {N}, otherwise has shared noise).", |
| "cite_spans": [ |
| { |
| "start": 311, |
| "end": 340, |
| "text": "Rasmussen and Williams (2006)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 200, |
| "end": 207, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 655, |
| "end": 662, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "WMT12: Single task", |
| "sec_num": null |
| }, |
| { |
| "text": "tial ARD kernel has the best performance under both measures of error, and for this reason we use this kernel in our subsequent experiments. WMT12: Multi-task We now turn to the multitask setting, where we seek to model each of the three annotators' predictions. Table 2 presents the results. Note that here error rates are measured over all of the three annotators' judgements, and consequently are higher than those measured against their average response in Table 1 . For comparison, taking the predictions of the best model, Combined, in Table 2 and evaluating its averaged prediction has a MAE of 0.6588 vs. the averaged gold standard, significantly outperforming the best model in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 263, |
| "end": 270, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 461, |
| "end": 468, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 542, |
| "end": 549, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 687, |
| "end": 694, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "WMT12: Single task", |
| "sec_num": null |
| }, |
| { |
| "text": "There are a number of important findings in Table 2. First, the independently trained models do well, outperforming the pooled model with fixed noise, indicating that naively pooling the data is counter-productive and that there are annotatorspecific biases. Including per-annotator noise to the pooled model provides a boost in performance, however the best results are obtained using the Combined kernel which brings the strengths of both the independent and pooled settings. There are only minor differences between the different multi-task kernels, and in this case per-annotator noise made little difference. An explanation for the contradictory findings about the importance of independent noise is that differences between annotators can already be explained by the MTL model using the multi-task kernel, and need not be explained as noise.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "WMT12: Single task", |
| "sec_num": null |
| }, |
| { |
| "text": "The GP models significantly improve over the baselines, including an SVM trained independently and using the EasyAdapt method for multi-task learning (Daum\u00e9 III, 2007) . While EasyAdapt showed an improvement over the independent SVM, it was a long way short of the GP models. A possible explanation is that in EasyAdapt the multi-task sharing parameter is set at a = 1, which may not be appropriate for the task. In contrast the Combined GP model learned a value of a = 0.01, weighting the value of pooling much more highly than independent training.", |
| "cite_spans": [ |
| { |
| "start": 150, |
| "end": 167, |
| "text": "(Daum\u00e9 III, 2007)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "WMT12: Single task", |
| "sec_num": null |
| }, |
| { |
| "text": "A remaining question is how these approaches cope with smaller datasets, where issues of data sparsity become more prevalent. To test this, we trained single-task, pooled and multi-task models on randomly sub-sampled training sets of different sizes, and plot their error rates in Figure 1 . As expected, for very small datasets pooling outperforms single task learning, however for modest sized datasets of \u2265 90 training instances pooling was inferior. For all dataset sizes multi-task learning is superior to the other approaches, making much better use of small and large training sets. The MTL model trained on 500 samples had an MAE of 0.7082 \u00b1 0.0042, close to the best results from the full dataset in Table 2 , despite using 1 9 as much data: here we use 1 3 as many training instances where each is singly (cf. triply) annotated. The same experiments run with multiplyannotated instances showed much weaker performance, presumably due to the more limited sample of input points and poorer fit of the ARD kernel hyperparameters. This finding suggests that our multi-task learning approach could be used to streamline annotation efforts by reducing the need for extensive multiple annotations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 281, |
| "end": 289, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 709, |
| "end": 716, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "WMT12: Single task", |
| "sec_num": null |
| }, |
| { |
| "text": "WPTP12 This dataset involves predicting the post-editing time for eight annotators, where we seek to test our model's capability to use additional metadata. We model the logarithm of the per-word post-editing time, in order to make the response variable more comparable between annotators and across sentences, and generally more Gaussian in shape. In Table 3 immediately we can see that the baseline of predicting the training mean is very difficult to beat, and the trained systems often do worse. Partitioning the data by annotator (\u00b5 A ) gives the best baseline result, while there is less information from the MT system or sentence identity. Single-task learning performs only a little better than these baselines, although some approaches such as the naive pooling perform terribly. This suggests that the tasks are highly different to one another. Interestingly, adding the per-task noise models to the pooling approach greatly improves its performance. The multi-task learning methods performed best when using the annotator identity as the task descriptor, and less well for the MT system and sentence pair, where they only slightly improved over the baseline. However, making use of all these layers of metadata together gives substantial further improvements, reaching the best result with Combined A,S,T . The effect of adding per-task noise to these models was less marked than for the pooled models, as in the WMT12 experiments. Inspecting the learned hyperparameters, the combined models learned a large bias towards independent learning over pooling, in contrast to the WMT12 experiments. This may explain the poor performance of EasyAdapt on this dataset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 352, |
| "end": 359, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "WMT12: Single task", |
| "sec_num": null |
| }, |
| { |
| "text": "This paper presented a novel approach for learning from human linguistic annotations by explicitly training models of individual annotators (and possibly additional metadata) using multi-task learning. Our method using Gaussian Processes is flexible, allowing easy learning of inter-dependences between different annotators and other task meta- Table 3 : Results on the WPTP12 dataset, using the log of the post-editing time per word as the response variable. Shown above are the training mean and SVM baselines, single-task learning and multi-task learning results (micro average). The subscripts denote the task split: annotator (A), MT system (S) and sentence identity (T).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 345, |
| "end": 352, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "data. Our experiments showed how our approach outperformed competitive baselines on two machine translation quality regression problems, including the highly challenging problem of predicting post-editing time.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In future work we plan to apply these techniques to new datasets, particularly noisy crowd-sourced data with much large numbers of annotators, as well as a wider range of task types and mixtures thereof (regression, ordinal regression, ranking, classification). We also have preliminary positive results for more advanced multi-task kernels, e.g., general dense matrices, which can induce clusters of related tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our multi-task learning approach has much wider application. Models of individual annotators could be used to train machine translation systems to optimise an annotator-specific quality measure, or in active learning for corpus annotation, where the model can suggest the most appropriate instances for each annotator or the best annotator for a given instance. Further, our approach contributes to work based on cheap and fast crowdsourcing of linguistic annotation by minimising the need for careful data curation and quality control.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We are not strictly the first,Polajnar et al. (2011) used GPs for text classification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Both datasets can be downloaded from http://www. dcs.shef.ac.uk/\u02dclucia/resources.html.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Our approach generalises to classification, ranking (ordinal regression) or various other training objectives, including mixtures of objectives. In this paper we use regression for simplicity of exposition and implementation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that the separable kernel (Eq. 3) gives rise to block structured kernel matrices which permit various optimisations(Bonilla et al., 2008) to reduce the computational complexity of inference, e.g., the matrix inversion in Eq. 2.5 Note that larger values of a need not affect the overall magnitude of k, which can be down-scaled by the \u03c3 2 f factor in the data kernel (Eq. 1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The software used to extract these (and other) features can be downloaded from http://www.quest. dcs.shef.ac.uk/ 7 http://www.gaussianprocess.org/gpml/ code", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was funded by PASCAL2 Harvest Programme, as part of the QuEst project: http: //www.quest.dcs.shef.ac.uk/. The authors would like to thank Neil Lawerence and James Hensman for advice on Gaussian Processes, the QuEst participants, particularly Jos\u00e9 Guilherme Camargo de Souza and Eva Hassler, and the three anonymous reviewers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Kernels for vector-valued functions: A review. Foundations and Trends in Machine Learning", |
| "authors": [ |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "4", |
| "issue": "", |
| "pages": "195--266", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lawrence. 2011. Kernels for vector-valued func- tions: A review. Foundations and Trends in Machine Learning, 4(3):195-266.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Goodness: a method for measuring machine translation confidence", |
| "authors": [ |
| { |
| "first": "Nguyen", |
| "middle": [], |
| "last": "Bach", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaser", |
| "middle": [], |
| "last": "Al-Onaizan", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "211--219", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nguyen Bach, Fei Huang, and Yaser Al-Onaizan. 2011. Goodness: a method for measuring machine translation confidence. In the 49th Annual Meet- ing of the Association for Computational Linguis- tics: Human Language Technologies, pages 211- 219, Portland, Oregon.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Confidence Estimation for Machine Translation", |
| "authors": [ |
| { |
| "first": "Nicola", |
| "middle": [], |
| "last": "Sanchis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ueffing", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "the 20th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "315--321", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sanchis, and Nicola Ueffing. 2004. Confidence Es- timation for Machine Translation. In the 20th Inter- national Conference on Computational Linguistics (Coling 2004), pages 315-321, Geneva.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Multi-task gaussian process prediction", |
| "authors": [ |
| { |
| "first": "Edwin", |
| "middle": [], |
| "last": "Bonilla", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Kian", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Chai", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Advances in Neural Information Processing Systems (NIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edwin Bonilla, Kian Ming Chai, and Christopher Williams. 2008. Multi-task gaussian process pre- diction. In Advances in Neural Information Process- ing Systems (NIPS).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Findings of the 2011 workshop on statistical machine translation", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| }, |
| { |
| "first": "Omar", |
| "middle": [], |
| "last": "Zaidan", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "the Sixth Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "22--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Callison-Burch, Philipp Koehn, Christof Monz, and Omar Zaidan. 2011. Findings of the 2011 work- shop on statistical machine translation. In the Sixth Workshop on Statistical Machine Translation, pages 22-64, Edinburgh, Scotland.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Findings of the 2012 workshop on statistical machine translation", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "the Seventh Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "10--51", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Callison-Burch, Philipp Koehn, Christof Monz, Matt Post, Radu Soricut, and Lucia Specia. 2012. Findings of the 2012 workshop on statistical ma- chine translation. In the Seventh Workshop on Statistical Machine Translation, pages 10-51, Montr\u00e9al, Canada.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Frustratingly easy domain adaptation", |
| "authors": [ |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "the 45th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hal Daum\u00e9 III. 2007. Frustratingly easy domain adap- tation. In the 45th Annual Meeting of the Associ- ation for Computational Linguistics, Prague, Czech Republic.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Learning multiple tasks with kernel methods", |
| "authors": [ |
| { |
| "first": "Theodoros", |
| "middle": [], |
| "last": "Evgeniou", |
| "suffix": "" |
| }, |
| { |
| "first": "Charles", |
| "middle": [ |
| "A" |
| ], |
| "last": "Micchelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimiliano", |
| "middle": [], |
| "last": "Pontil", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "6", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theodoros Evgeniou, Charles A. Micchelli, and Massi- miliano Pontil. 2006. Learning multiple tasks with kernel methods. Journal of Machine Learning Re- search, 6(1):615.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Novel tools to streamline the conference review process: experiences from SIGKDD'09", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Flach", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruno", |
| "middle": [], |
| "last": "Spiegler", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Gol\u00e9nia", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Price", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralf", |
| "middle": [], |
| "last": "Guiver", |
| "suffix": "" |
| }, |
| { |
| "first": "Thore", |
| "middle": [], |
| "last": "Herbrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammed", |
| "middle": [ |
| "J" |
| ], |
| "last": "Graepel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zaki", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "SIGKDD Explor. Newsl", |
| "volume": "11", |
| "issue": "2", |
| "pages": "63--67", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter A. Flach, Sebastian Spiegler, Bruno Gol\u00e9nia, Si- mon Price, John Guiver, Ralf Herbrich, Thore Grae- pel, and Mohammed J. Zaki. 2010. Novel tools to streamline the conference review process: experi- ences from SIGKDD'09. SIGKDD Explor. Newsl., 11(2):63-67, May.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Multiple kernel learning algorithms", |
| "authors": [ |
| { |
| "first": "Mehmet", |
| "middle": [], |
| "last": "G\u00f6nen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ethem", |
| "middle": [], |
| "last": "Alpayd\u0131n", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2211--2268", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mehmet G\u00f6nen and Ethem Alpayd\u0131n. 2011. Multi- ple kernel learning algorithms. Journal of Machine Learning Research, 12:2211-2268.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Learning from multiple annotators with gaussian processes", |
| "authors": [ |
| { |
| "first": "Perry", |
| "middle": [], |
| "last": "Groot", |
| "suffix": "" |
| }, |
| { |
| "first": "Adriana", |
| "middle": [], |
| "last": "Birlutiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Heskes", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 21st international conference on Artificial neural networks -Volume Part II, ICANN'11", |
| "volume": "", |
| "issue": "", |
| "pages": "159--164", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Perry Groot, Adriana Birlutiu, and Tom Heskes. 2011. Learning from multiple annotators with gaussian processes. In Proceedings of the 21st international conference on Artificial neural networks -Volume Part II, ICANN'11, pages 159-164, Espoo, Finland.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Bridging smt and tm with translation recommendation", |
| "authors": [ |
| { |
| "first": "Yifan", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanjun", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Van Genabith", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [ |
| "Way" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "the 48th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "622--630", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yifan He, Yanjun Ma, Josef van Genabith, and Andy Way. 2010. Bridging smt and tm with transla- tion recommendation. In the 48th Annual Meet- ing of the Association for Computational Linguis- tics, pages 622-630, Uppsala, Sweden.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Quality management on amazon mechanical turk", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Panagiotis", |
| "suffix": "" |
| }, |
| { |
| "first": "Foster", |
| "middle": [], |
| "last": "Ipeirotis", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Provost", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the ACM SIGKDD Workshop on Human Computation, HCOMP '10", |
| "volume": "", |
| "issue": "", |
| "pages": "64--67", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Panagiotis G. Ipeirotis, Foster Provost, and Jing Wang. 2010. Quality management on amazon mechanical turk. In Proceedings of the ACM SIGKDD Work- shop on Human Computation, HCOMP '10, pages 64-67, Washington DC.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Post-editing time as a measure of cognitive effort", |
| "authors": [ |
| { |
| "first": "Maarit", |
| "middle": [], |
| "last": "Koponen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wilker", |
| "middle": [], |
| "last": "Aziz", |
| "suffix": "" |
| }, |
| { |
| "first": "Luciana", |
| "middle": [], |
| "last": "Ramos", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the AMTA 2012 Workshop on Post-editing Technology and Practice", |
| "volume": "2012", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maarit Koponen, Wilker Aziz, Luciana Ramos, and Lucia Specia. 2012. Post-editing time as a mea- sure of cognitive effort. In Proceedings of the AMTA 2012 Workshop on Post-editing Technology and Practice, WPTP 2012, San Diego, CA.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "the 40th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "Pennsyl-- vania", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In the 40th An- nual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsyl- vania.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "A productivity test of statistical machine translation post-editing in a typical localisation context", |
| "authors": [ |
| { |
| "first": "Mirko", |
| "middle": [], |
| "last": "Plitt", |
| "suffix": "" |
| }, |
| { |
| "first": "Fran\u00e7ois", |
| "middle": [], |
| "last": "Masselot", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Prague Bull. Math. Linguistics", |
| "volume": "93", |
| "issue": "", |
| "pages": "7--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mirko Plitt and Fran\u00e7ois Masselot. 2010. A productiv- ity test of statistical machine translation post-editing in a typical localisation context. Prague Bull. Math. Linguistics, 93:7-16.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Protein interaction detection in sentences via gaussian processes; a preliminary evaluation", |
| "authors": [ |
| { |
| "first": "Tamara", |
| "middle": [], |
| "last": "Polajnar", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Girolami", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Int. J. Data Min. Bioinformatics", |
| "volume": "5", |
| "issue": "1", |
| "pages": "52--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tamara Polajnar, Simon Rogers, and Mark Girolami. 2011. Protein interaction detection in sentences via gaussian processes; a preliminary evaluation. Int. J. Data Min. Bioinformatics, 5(1):52-72, February.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Training a sentence-level machine translation confidence metric", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [ |
| "B" |
| ], |
| "last": "Quirk", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the International Conference on Language Resources and Evaluation", |
| "volume": "4", |
| "issue": "", |
| "pages": "825--828", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher B. Quirk. 2004. Training a sentence-level machine translation confidence metric. In Proceed- ings of the International Conference on Language Resources and Evaluation, volume 4 of LREC 2004, pages 825-828, Lisbon, Portugal.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Gaussian processes for machine learning", |
| "authors": [ |
| { |
| "first": "Carl", |
| "middle": [ |
| "E" |
| ], |
| "last": "Rasmussen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "K I" |
| ], |
| "last": "Williams", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carl E. Rasmussen and Christopher K.I. Williams. 2006. Gaussian processes for machine learning, volume 1. MIT press Cambridge, MA.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Eliminating spammers and ranking annotators for crowdsourced labeling tasks", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Vikas", |
| "suffix": "" |
| }, |
| { |
| "first": "Shipeng", |
| "middle": [], |
| "last": "Raykar", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "J. Mach. Learn. Res", |
| "volume": "13", |
| "issue": "", |
| "pages": "491--518", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vikas C. Raykar and Shipeng Yu. 2012. Eliminating spammers and ranking annotators for crowdsourced labeling tasks. J. Mach. Learn. Res., 13:491-518.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Learning from crowds", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Vikas", |
| "suffix": "" |
| }, |
| { |
| "first": "Shipeng", |
| "middle": [], |
| "last": "Raykar", |
| "suffix": "" |
| }, |
| { |
| "first": "Linda", |
| "middle": [ |
| "H" |
| ], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerardo", |
| "middle": [ |
| "Hermosillo" |
| ], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Charles", |
| "middle": [], |
| "last": "Valadez", |
| "suffix": "" |
| }, |
| { |
| "first": "Luca", |
| "middle": [], |
| "last": "Florin", |
| "suffix": "" |
| }, |
| { |
| "first": "Linda", |
| "middle": [], |
| "last": "Bogoni", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Moy", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "J. Mach. Learn. Res", |
| "volume": "99", |
| "issue": "", |
| "pages": "1297--1322", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vikas C. Raykar, Shipeng Yu, Linda H. Zhao, Ger- ardo Hermosillo Valadez, Charles Florin, Luca Bo- goni, and Linda Moy. 2010. Learning from crowds. J. Mach. Learn. Res., 99:1297-1322.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Semi-parametric analysis of multi-rater data", |
| "authors": [ |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Girolami", |
| "suffix": "" |
| }, |
| { |
| "first": "Tamara", |
| "middle": [], |
| "last": "Polajnar", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Statistics and Computing", |
| "volume": "20", |
| "issue": "3", |
| "pages": "317--334", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simon Rogers, Mark Girolami, and Tamara Polajnar. 2010. Semi-parametric analysis of multi-rater data. Statistics and Computing, 20(3):317-334.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Get another label? Improving data quality and data mining using multiple, noisy labelers", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Victor", |
| "suffix": "" |
| }, |
| { |
| "first": "Foster", |
| "middle": [], |
| "last": "Sheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Panagiotis", |
| "middle": [ |
| "G" |
| ], |
| "last": "Provost", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ipeirotis", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 14th ACM SIGKDD, KDD'08", |
| "volume": "", |
| "issue": "", |
| "pages": "614--622", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor S. Sheng, Foster Provost, and Panagiotis G. Ipeirotis. 2008. Get another label? Improving data quality and data mining using multiple, noisy la- belers. In Proceedings of the 14th ACM SIGKDD, KDD'08, pages 614-622, Las Vegas, Nevada.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Cheap and fast-but is it good? Evaluating non-expert annotations for natural language tasks", |
| "authors": [ |
| { |
| "first": "Rion", |
| "middle": [], |
| "last": "Snow", |
| "suffix": "" |
| }, |
| { |
| "first": "O'", |
| "middle": [], |
| "last": "Brendan", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Connor", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "the 2008 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "254--263", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rion Snow, Brendan O'Connor, Daniel Jurafsky, and Andrew Y. Ng. 2008. Cheap and fast-but is it good? Evaluating non-expert annotations for natural language tasks. In the 2008 Conference on Empiri- cal Methods in Natural Language Processing, pages 254-263, Honolulu, Hawaii.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Trustrank: Inducing trust in automatic translations via ranking", |
| "authors": [ |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdessamad", |
| "middle": [], |
| "last": "Echihabi", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "612--621", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Radu Soricut and Abdessamad Echihabi. 2010. Trustrank: Inducing trust in automatic translations via ranking. In the 49th Annual Meeting of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 612-621, Uppsala, Swe- den, July.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Estimating the Sentence-Level Quality of Machine Translation Systems", |
| "authors": [ |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Turchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicola", |
| "middle": [], |
| "last": "Cancedda", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Dymetman", |
| "suffix": "" |
| }, |
| { |
| "first": "Nello", |
| "middle": [], |
| "last": "Cristianini", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "the 13th Annual Meeting of the European Association for Machine Translation (EAMT'2009)", |
| "volume": "", |
| "issue": "", |
| "pages": "28--37", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucia Specia, Marco Turchi, Nicola Cancedda, Marc Dymetman, and Nello Cristianini. 2009. Estimat- ing the Sentence-Level Quality of Machine Trans- lation Systems. In the 13th Annual Meeting of the European Association for Machine Translation (EAMT'2009), pages 28-37, Barcelona.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Machine translation evaluation versus quality estimation", |
| "authors": [ |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhwaj", |
| "middle": [], |
| "last": "Raj", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Turchi", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "39--50", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucia Specia, Dhwaj Raj, and Marco Turchi. 2010. Machine translation evaluation versus quality esti- mation. Machine Translation, pages 39-50.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Exploiting Objective Annotations for Measuring Translation Post-editing Effort", |
| "authors": [ |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "the 15th Annual Meeting of the European Association for Machine Translation (EAMT'2011)", |
| "volume": "", |
| "issue": "", |
| "pages": "73--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucia Specia. 2011. Exploiting Objective Annotations for Measuring Translation Post-editing Effort. In the 15th Annual Meeting of the European Association for Machine Translation (EAMT'2011), pages 73- 80, Leuven.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "The Multidimensional Wisdom of Crowds", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Welinder", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Branson", |
| "suffix": "" |
| }, |
| { |
| "first": "Serge", |
| "middle": [], |
| "last": "Belongie", |
| "suffix": "" |
| }, |
| { |
| "first": "Pietro", |
| "middle": [], |
| "last": "Perona", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "23", |
| "issue": "", |
| "pages": "2424--2432", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Welinder, Steve Branson, Serge Belongie, and Pietro Perona. 2010. The Multidimensional Wis- dom of Crowds. In Advances in Neural Information Processing Systems, volume 23, pages 2424-2432.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Whose vote should count more: Optimal integration of labels from labelers of unknown expertise", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Whitehill", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Ruvolo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ting-Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Javier", |
| "middle": [], |
| "last": "Bergsma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Movellan", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "22", |
| "issue": "", |
| "pages": "2035--2043", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Whitehill, Paul Ruvolo, Ting-fan Wu, Jacob Bergsma, and Javier Movellan. 2009. Whose vote should count more: Optimal integration of labels from labelers of unknown expertise. Advances in Neural Information Processing Systems, 22:2035- 2043.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "text": "Learning curve comparing MAE for different training methods on the WMT12 dataset, all using a squared exponential ARD data kernel and tied noise parameter. The MTL model uses the Combined task kernel. Each point is the average of 5 runs, and the error bars show \u00b11 s.d.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| } |
| } |
| } |
| } |