| { |
| "paper_id": "Q18-1026", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:10:38.594484Z" |
| }, |
| "title": "Finding Convincing Arguments Using Scalable Bayesian Preference Learning", |
| "authors": [ |
| { |
| "first": "Edwin", |
| "middle": [], |
| "last": "Simpson", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Ubiquitous Knowledge Processing Lab (UKP)", |
| "institution": "Technische Universit\u00e4t Darmstadt", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Ubiquitous Knowledge Processing Lab (UKP)", |
| "institution": "Technische Universit\u00e4t Darmstadt", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We introduce a scalable Bayesian preference learning method for identifying convincing arguments in the absence of gold-standard ratings or rankings. In contrast to previous work, we avoid the need for separate methods to perform quality control on training data, predict rankings and perform pairwise classification. Bayesian approaches are an effective solution when faced with sparse or noisy training data, but have not previously been used to identify convincing arguments. One issue is scalability, which we address by developing a stochastic variational inference method for Gaussian process (GP) preference learning. We show how our method can be applied to predict argument convincingness from crowdsourced data, outperforming the previous state-of-the-art, particularly when trained with small amounts of unreliable data. We demonstrate how the Bayesian approach enables more effective active learning, thereby reducing the amount of data required to identify convincing arguments for new users and domains. While word embeddings are principally used with neural networks, our results show that word embeddings in combination with linguistic features also benefit GPs when predicting argument convincingness.", |
| "pdf_parse": { |
| "paper_id": "Q18-1026", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We introduce a scalable Bayesian preference learning method for identifying convincing arguments in the absence of gold-standard ratings or rankings. In contrast to previous work, we avoid the need for separate methods to perform quality control on training data, predict rankings and perform pairwise classification. Bayesian approaches are an effective solution when faced with sparse or noisy training data, but have not previously been used to identify convincing arguments. One issue is scalability, which we address by developing a stochastic variational inference method for Gaussian process (GP) preference learning. We show how our method can be applied to predict argument convincingness from crowdsourced data, outperforming the previous state-of-the-art, particularly when trained with small amounts of unreliable data. We demonstrate how the Bayesian approach enables more effective active learning, thereby reducing the amount of data required to identify convincing arguments for new users and domains. While word embeddings are principally used with neural networks, our results show that word embeddings in combination with linguistic features also benefit GPs when predicting argument convincingness.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Arguments are intended to persuade the audience of a particular point of view and are an important way for humans to reason about controversial topics (Mercier and Sperber, 2011) . The amount of argumentative text on any chosen subject can, how-Topic: \"William Farquhar ought to be honoured as the rightful founder of Singapore\". Stance: \"No, it is Raffles!\" Argument 1: HE HAS A BOSS(RAFFLES) HE HAS TO FOLLOW HIM AND NOT GO ABOUT DOING ANYTHING ELSE... Argument 2: Raffles conceived a town plan to remodel Singapore into a modern city. The plan consisted of separate areas for different... Crowdsourced labels: {2 1, 1 2, 2 1} ever, overwhelm a reader. Consider the scale of historical text archives or social media platforms with millions of users. Automated methods could help readers overcome this challenge by identifying highquality, persuasive arguments from both sides of a debate.", |
| "cite_spans": [ |
| { |
| "start": 151, |
| "end": 178, |
| "text": "(Mercier and Sperber, 2011)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Theoretical approaches for assessing argument quality have proved difficult to apply to everyday arguments (Boudry et al., 2015) . Empirical machine learning approaches instead train models using example judgments of arguments, such as those shown in Figure 1 . Previous approaches to obtaining such judgments include training annotators to assign scores from 1-6 (Persing and Ng, 2017) , asking annotators for simple binary or three-class categories (Wei et al., 2016b) , and aggregating binary votes from multiple people (Wei et al., 2016a; Tan et al., 2016) . However, these approaches are limited by the cost of training annotators, a highly restricted set of categories, or the need for multiple annotators per document.", |
| "cite_spans": [ |
| { |
| "start": 107, |
| "end": 128, |
| "text": "(Boudry et al., 2015)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 364, |
| "end": 386, |
| "text": "(Persing and Ng, 2017)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 451, |
| "end": 470, |
| "text": "(Wei et al., 2016b)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 523, |
| "end": 542, |
| "text": "(Wei et al., 2016a;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 543, |
| "end": 560, |
| "text": "Tan et al., 2016)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 251, |
| "end": 259, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "An alternative way to judge arguments is to compare them against one another (Habernal and Gurevych, 2016) . When comparing the arguments in Figure 1 , we may judge that argument 1 is less convincing due to its writing style, whereas argument 2 presents evidence in the form of historical events. Pairwise comparisons such as this are known to place less cognitive burden on human annotators than choosing a numerical rating and allow fine-grained sorting of items that is not possible with categorical labels (Kendall, 1948; Kingsley and Brown, 2010) . Unlike numerical ratings, pairwise comparisons are not affected by different annotators' biases toward high, low or middling values, or an individual's bias changing over time.", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 106, |
| "text": "(Habernal and Gurevych, 2016)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 510, |
| "end": 525, |
| "text": "(Kendall, 1948;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 526, |
| "end": 551, |
| "text": "Kingsley and Brown, 2010)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 141, |
| "end": 149, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In practice, we face a data acquisition bottleneck when encountering new domains or audiences. For example, neural network methods typically require datasets with many thousands of hand-labeled examples to perform well (Srivastava et al., 2014; Collobert et al., 2011) . One solution is to employ multiple non-specialist annotators at low cost (crowdsourcing), but this requires quality control techniques to account for errors. Another source of data are the actions of users of a software application, which can be interpreted as pairwise judgments (Joachims, 2002) . For example, when a user clicks on an argument in a list it can be interpreted as a preference for the selected argument over more highly-ranked arguments. However, the resulting pairwise labels are likely to be a very noisy indication of preference.", |
| "cite_spans": [ |
| { |
| "start": 219, |
| "end": 244, |
| "text": "(Srivastava et al., 2014;", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 245, |
| "end": 268, |
| "text": "Collobert et al., 2011)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 551, |
| "end": 567, |
| "text": "(Joachims, 2002)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we develop a Bayesian approach to learn from noisy pairwise preferences based on Gaussian process preference learning (GPPL) (Chu and Ghahramani, 2005) . We model argument convincingness as a function of textual features, including word embeddings, and develop an inference method for GPPL that scales to realistic dataset sizes using stochastic variational inference (SVI) (Hoffman et al., 2013) . Using datasets provided by Habernal and Gurevych (2016), we show that our method outperforms the previous state-of-the-art for ranking arguments by convincingness and identifying the most convincing argument in a pair. Further experiments show that our approach is particularly advantageous with small, noisy datasets, and in an active learning set-up. Our software is publicly available 1 .", |
| "cite_spans": [ |
| { |
| "start": 140, |
| "end": 166, |
| "text": "(Chu and Ghahramani, 2005)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 389, |
| "end": 411, |
| "text": "(Hoffman et al., 2013)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of the paper is structured as follows. Section 2 reviews related work on argumentation, then Section 3 motivates the use of Bayesian methods by discussing their successful applications in NLP. In Section 4, we review preference learning methods and then Section 5 describes our scalable Gaussian process-based approach. Section 6 presents our evaluation, comparing our method to the state-of-the art and testing with noisy data and active learning. Finally, we present conclusions and future work. Lukin et al. (2017) demonstrated that an audience's personality and prior stance affect an argument's persuasiveness, but they were unable to predict belief change to a high degree of accuracy. Related work has shown how persuasiveness is also affected by the sequence of arguments in a discussion (Tan et al., 2016; Rosenfeld and Kraus, 2016; Monteserin and Amandi, 2013) , but this work focuses on predicting salience of an argument given the state of the debate, rather than the qualities of arguments. Wachsmuth et al. (2017) recently showed that relative comparisons of argument convincingness correlate with theory-derived quality ratings. Habernal and Gurevych (2016) established datasets containing crowdsourced pairwise judgments of convincingness for arguments taken from online discussions. Errors in the crowdsourced data were handled by determining gold labels using the MACE algorithm (Hovy et al., 2013) . The gold labels were then used to train SVM and bi-directional long short-term memory (BiLSTM) classifiers to predict pairwise labels for new arguments. The gold labels were also used to construct a directed graph of convincingness, which was input to PageRank to produce scores for each argument. These scores were then used to train SVM and BiLSTM regression models. A drawback of such pipeline approaches is that they are prone to error propagation (Chen and Ng, 2016) , and consensus algorithms, such as MACE, require multiple crowdsourced labels for each argument pair, which increases annotation costs.", |
| "cite_spans": [ |
| { |
| "start": 507, |
| "end": 526, |
| "text": "Lukin et al. (2017)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 805, |
| "end": 823, |
| "text": "(Tan et al., 2016;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 824, |
| "end": 850, |
| "text": "Rosenfeld and Kraus, 2016;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 851, |
| "end": 879, |
| "text": "Monteserin and Amandi, 2013)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1013, |
| "end": 1036, |
| "text": "Wachsmuth et al. (2017)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 1406, |
| "end": 1425, |
| "text": "(Hovy et al., 2013)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1880, |
| "end": 1899, |
| "text": "(Chen and Ng, 2016)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "When faced with a lack of reliable annotated data, Bayesian approaches have a number of advantages. Bayesian inference provides a mathematical framework for combining multiple observations with prior information. Given a model, M , and observed data, D, we apply Bayes' rule to obtain a posterior distribution over M , which can be used to make predictions about unseen data or latent variables:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bayesian Methods for NLP", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (M |D) = P (D|M )P (M ) P (D) ,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Bayesian Methods for NLP", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where P (D|M ) is the likelihood of the data given M , and P (M ) is the model prior. If the dataset is small, the posterior remains close to the prior, so the model does not assume extreme values given a small training sample. Rather than learning a posterior, neural network training typically selects model parameters that maximize the likelihood, so they are more prone to overfitting with small datasets, which can reduce performance (Xiong et al., 2011) . Bayesian methods can be trained using unsupervised or semi-supervised learning to take advantage of structure in unlabeled data when labeled data is in short supply. Popular examples in NLP are Latent Dirichlet Allocation (LDA) (Blei et al., 2003) , which is used for topic modelling, and its extension, the hierarchical Dirichlet process (HDP) (Teh et al., 2005) , which learns the number of topics rather than requiring it to be fixed a priori. Semisupervised Bayesian learning has also been used to achieve state-of-the-art results for semantic role labelling (Titov and Klementiev, 2012) .", |
| "cite_spans": [ |
| { |
| "start": 439, |
| "end": 459, |
| "text": "(Xiong et al., 2011)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 690, |
| "end": 709, |
| "text": "(Blei et al., 2003)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 807, |
| "end": 825, |
| "text": "(Teh et al., 2005)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 1025, |
| "end": 1053, |
| "text": "(Titov and Klementiev, 2012)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bayesian Methods for NLP", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We can combine independent pieces of weak evidence using Bayesian methods through the likelihood. For instance, a Bayesian network can be used to infer attack relations between arguments by combining votes for acceptable arguments from different people (Kido and Okamoto, 2017) . Other Bayesian approaches combine crowdsourced annotations to train a sentiment classifier without a separate quality control step (Simpson et al., 2015; Felt et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 253, |
| "end": 277, |
| "text": "(Kido and Okamoto, 2017)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 411, |
| "end": 433, |
| "text": "(Simpson et al., 2015;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 434, |
| "end": 452, |
| "text": "Felt et al., 2016)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bayesian Methods for NLP", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Several successful Bayesian approaches in NLP make use of Gaussian processes (GPs), which are distributions over functions of input features. GPs are nonparametric, meaning they can model highly nonlinear functions by allowing function complexity to grow with the amount of data (Rasmussen and Williams, 2006) . They account for model uncertainty when extrapolating from sparse training data and can be incorporated into larger graphical models. Example applications include analyzing the relationship between a user's impact on Twitter and the textual features of their tweets (Lampos et al., 2014) , predicting the level of emotion in text (Beck et al., 2014) , and estimating the quality of machine translations given source and translated texts (Cohn and Specia, 2013) .", |
| "cite_spans": [ |
| { |
| "start": 279, |
| "end": 309, |
| "text": "(Rasmussen and Williams, 2006)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 578, |
| "end": 599, |
| "text": "(Lampos et al., 2014)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 642, |
| "end": 661, |
| "text": "(Beck et al., 2014)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 749, |
| "end": 772, |
| "text": "(Cohn and Specia, 2013)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bayesian Methods for NLP", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our aim is to develop a Bayesian method for identifying convincing arguments given their features, which can be trained on noisy pairwise labels. Each label, i j, states that an argument, i, is more convincing than argument, j. This learning task is a form of preference learning, which can be addressed in several ways. A simple approach is to use a generic classifier by obtaining a single feature vector for each pair in the training and test datasets, either by concatenating the feature vectors of the items in the pair, or by computing the difference of the two feature vectors, as in SVM-Rank (Joachims, 2002) . However, this approach does not produce ranked lists of convincing arguments without predicting a large number of pairwise labels, nor give scores of convincingness.", |
| "cite_spans": [ |
| { |
| "start": 600, |
| "end": 616, |
| "text": "(Joachims, 2002)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preference Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Alternatively, we could learn an ordering over arguments directly using Mallows models (Mallows, 1957) , which define distributions over permutations. Mallows models can be trained from pairwise preferences (Lu and Boutilier, 2011) , but inference is usually costly since the number of possible permutations is O(N !), where N is the number of arguments. Modeling only the ordering does not allow us to quantify the difference between arguments at similar ranks.", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 102, |
| "text": "(Mallows, 1957)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 207, |
| "end": 231, |
| "text": "(Lu and Boutilier, 2011)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preference Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To avoid the problems of classifier-based and permutation-based methods, we propose to learn a real-valued convincingness function, f , that takes argument features as input and can be used to predict rankings, pairwise labels, or ratings for individual arguments. There are two well established approaches for mapping pairwise labels to real-valued scores: the Bradley-Terry-Plackett -Luce model (Bradley and Terry, 1952; Luce, 1959; Plack-ett, 1975) and the Thurstone -Mosteller model (Thurstone, 1927; Mosteller, 2006) . Based on the latter approach, Chu and Ghahramani (2005) introduced Gaussian process preference learning (GPPL), a Bayesian model that can tolerate errors in pairwise training labels and gains the advantages of a GP for learning nonlinear functions from sparse datasets. However, the inference method proposed by Chu and Ghahramani (2005) has memory and computational costs that scale with O(N 3 ), making it unsuitable for real-world text datasets. The next section explains how we use recent developments in inference methods to develop scalable Bayesian preference learning for argument convincingness.", |
| "cite_spans": [ |
| { |
| "start": 385, |
| "end": 422, |
| "text": "-Luce model (Bradley and Terry, 1952;", |
| "ref_id": null |
| }, |
| { |
| "start": 423, |
| "end": 434, |
| "text": "Luce, 1959;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 435, |
| "end": 451, |
| "text": "Plack-ett, 1975)", |
| "ref_id": null |
| }, |
| { |
| "start": 470, |
| "end": 504, |
| "text": "-Mosteller model (Thurstone, 1927;", |
| "ref_id": null |
| }, |
| { |
| "start": 505, |
| "end": 521, |
| "text": "Mosteller, 2006)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 554, |
| "end": 579, |
| "text": "Chu and Ghahramani (2005)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 836, |
| "end": 861, |
| "text": "Chu and Ghahramani (2005)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preference Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "First, we introduce a probabilistic model for preference learning (Chu and Ghahramani, 2005) . We observe preference pairs, each consisting of a pair of feature vectors x i and x j , for arguments i and j, and a label y \u2208 {i j, j i}. We assume that the likelihood of y depends on the latent convincingness, f (x i ) and f (x j ), of the arguments in the pair. Our goal is to predict y for pairs that have not been observed, and predict f (x i ), which may be used to rank arguments. The relationship between convincingness and pairwise labels is described by the following:", |
| "cite_spans": [ |
| { |
| "start": 66, |
| "end": 92, |
| "text": "(Chu and Ghahramani, 2005)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(i j|f (x i ), f (x j ), \u03b4 i , \u03b4 j ) = 1 if f (x i ) + \u03b4 i \u2265 f (j) + \u03b4 j 0 otherwise,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where \u03b4 \u223c N (0, 1) is Gaussian-distributed noise. If the convincingness f (x i ) is higher than the convincingness f (x j ), the preference label i j is more likely to be true. However, the label also depends on the noise terms, \u03b4 i and \u03b4 j , to allow for errors caused by, for example, disagreement between human annotators. We simplify Equation 2 by integrating out \u03b4 i and \u03b4 j to obtain the preference likelihood:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(i j|f (x i ), f (x j )) = p(i j|f (x i ), f (x j ), \u03b4 i , \u03b4 j ) N (\u03b4 i ; 0, 1)N (\u03b4 j ; 0, 1)d\u03b4 i d\u03b4 j = \u03a6 (z) ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "z = (f (x i ) \u2212 f (x j ))/ \u221a 2,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "and \u03a6 is the cumulative distribution function of the standard normal distribution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We assume that convincingness is a function, f , of argument features, drawn from a Gaussian process prior: f \u223c GP(0, k \u03b8 s), where k \u03b8 is a kernel function with hyper-parameters \u03b8, and s is a scale parameter. The kernel function controls the smoothness of f over the feature space, while s controls the variance of f . Increasing s means that, on average, the magnitude of f (x i ) \u2212 f (x j ) increases so that \u03a6(z) is closer to 0 or 1, and erroneous pairwise labels are less likely. Therefore, larger values of s correspond to less observation noise. We assume a Gamma distribution 1/s \u223c G(a 0 , b 0 ) with shape a 0 and scale b 0 , as this is a conjugate prior.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Given N arguments and P labeled preference pairs, y = {y 1 , ..., y P }, we can make predictions by finding the posterior distribution over the convinc-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "ingness values, f = {f (x 1 ), ..., f (x N )}, given by: p (f |y, k \u03b8 , a 0 , b 0 ) \u221d p(y|f )p(f |k \u03b8 , a 0 , b 0 ) = 1 Z P k=1 \u03a6(z k ) N (f ; 0, K \u03b8 s)G(s; a 0 , b 0 )ds,", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where Z = p (y|k \u03b8 , a 0 , b 0 ). Unfortunately, neither Z nor the integral over s can be computed analytically, so we must turn to approximations. Chu and Ghahramani (2005) used a Laplace approximation for GPPL, which finds a maximum aposteriori (MAP) solution that has been shown to perform poorly in many cases (Nickisch and Rasmussen, 2008) . More accurate estimates of the posterior could be obtained using Markov chain Monte Carlo sampling (MCMC), but this is very computationally expensive (Nickisch and Rasmussen, 2008) . Instead, we use a faster variational method that maintains the benefits of the Bayesian approach (Reece et al., 2011; Steinberg and Bonilla, 2014) and adapt this method to the preference likelihood given by Equation 3.", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 173, |
| "text": "Chu and Ghahramani (2005)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 314, |
| "end": 344, |
| "text": "(Nickisch and Rasmussen, 2008)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 497, |
| "end": 527, |
| "text": "(Nickisch and Rasmussen, 2008)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 627, |
| "end": 647, |
| "text": "(Reece et al., 2011;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 648, |
| "end": 676, |
| "text": "Steinberg and Bonilla, 2014)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "To apply the variational approach, we define an approximation q(f ) to Equation 4. First, we approximate the preference likelihood with a Gaussian, P k=1 \u03a6 (z k ) \u2248 N (y; Gf , Q). This allows us to avoid the intractable integral in Z and obtain another Gaussian, q(f ) = N (f ;f , C). The parametersf and C depend on the approximate preference likelihood and an approximate distribution over s: q(s) = G(s; a, b). The variational inference algorithm begins by initializing the parameters G,f , C, a and b at random. Then, the algorithm proceeds iteratively updating each parameter in turn, given the current values for the other parameters. This optimization procedure minimizes the Kullback-Leibler (KL) divergence of p(f |y, k \u03b8 , a 0 , b 0 ) from q(f ), causing q(f ) to converge to an approximate posterior.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The update equations for the meanf and covariance C require inverting the covariance matrix, K \u03b8 , at a computational cost of O(N 3 ), which is impractical with more than a few hundred data points. Furthermore, the updates also require O(N P ) computations and have O(N 2 + N P + P 2 ) memory complexity. To resolve this, we apply a recently introduced technique, stochastic variational inference (SVI) (Hoffman et al., 2013; Hensman et al., 2015) , to scale to datasets containing at least tens of thousands of arguments and pairwise labels.", |
| "cite_spans": [ |
| { |
| "start": 403, |
| "end": 425, |
| "text": "(Hoffman et al., 2013;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 426, |
| "end": 447, |
| "text": "Hensman et al., 2015)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "SVI makes two approximations: it assumes M inducing points, which act as a substitute for the observed arguments; it uses only a random subset of the data containing P n pairs at each iteration. At each iteration, t, rather than updatef and C directly, we update the meanf m and covariance C m for the inducing points. The update for each parameter \u03bb \u2208 {f m , C m } takes the form of a weighted mean of the previous estimate and a new estimate computed from only a subset of observations:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03bb (t) = (1 \u2212 \u03c1 t )\u03bb (t\u22121) + \u03c1 t\u03bbt P/P n ,", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where \u03c1 = t \u2212u is the step size, u is a forgetting rate, and\u03bb t is the new estimate computed from P n out of P observations. The values off and C can be estimated from the inducing point distribution. By choosing M < < N and P n < < P , we limit the computational complexity of each SVI iteration to O(M 3 +M P n ) and the memory complexity O(M 2 + M P n + P 2 n ). To choose representative inducing points, we use K-means++ (Arthur and Vassilvitskii, 2007) with K = M to rapidly cluster the feature vectors, then take the cluster centers as inducing points. Compared to standard K-means, K-means++ introduces a new method for choosing the initial cluster seeds that reduces the number of poorquality clusterings.", |
| "cite_spans": [ |
| { |
| "start": 425, |
| "end": 457, |
| "text": "(Arthur and Vassilvitskii, 2007)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "A further benefit of GPs is that they enable automatic relevance determination (ARD) to identify informative features, which works as follows. The prior covariance of f is defined by a kernel function of the form", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "k \u03b8 (x, x ) = D d=1 k d (|x d \u2212 x d |/l d ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where k d is a function of the distance between the values of feature d for items x and x , and a lengthscale hyper-parameter, l d . The length-scale controls the smoothness of the function across the feature space, and can be optimized by choosing the value of l d that maximizes the approximate log marginal likelihood, L \u2248 log p(y). This process is known as maximum likelihood II (MLII) (Rasmussen and Williams, 2006) . Features with larger length-scales after optimization are less relevant because their values have less effect on k \u03b8 (x, x ). To avoid the cost of optimizing the length-scales, we can alternatively set them using a median heuristic, which has been shown to perform well in practice (Gretton et al., 2012):", |
| "cite_spans": [ |
| { |
| "start": 390, |
| "end": 420, |
| "text": "(Rasmussen and Williams, 2006)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "l d = 1 D median ({|x i,d \u2212 x j,d |, \u2200i = 1, ..., N, \u2200j = 1, ..., N }).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scalable Bayesian Preference Learning", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We first use toy datasets to illustrate the behavior of several different methods (described below) . Then, we analyze the scalability and performance of our approach on datasets provided by Habernal and Gurevych (2016) , which contain pairwise labels for arguments taken from online discussion forums. The labels can have a value of 0, meaning the annotator found the second argument in the pair more convincing, 1 if the annotator was undecided, or 2 if the first argument was more convincing. To test different scenarios, different pre-processing steps were used to produce the three UKPConvArg* datasets shown in Table 1 . UKPConvArgStrict and UKPCon-vArgRank were cleaned to remove disagreements between annotators, hence can be considered to be noise-free. UKPConvArgCrowdSample is used to evaluate performance with noisy crowdsourced data including conflicts and undecided labels, and to test the suitability of our method for active learning to address the cold-start problem in domains with no labeled data. For these datasets, we perform 32-fold cross validation, where each fold corresponds to one of 16 controversial topics, and one of two stances for that topic.", |
| "cite_spans": [ |
| { |
| "start": 191, |
| "end": 219, |
| "text": "Habernal and Gurevych (2016)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 82, |
| "end": 99, |
| "text": "(described below)", |
| "ref_id": null |
| }, |
| { |
| "start": 617, |
| "end": 624, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Our two tasks are ranking arguments by convincingness and classification of pairwise labels to predict which argument is more convincing. For both tasks, our proposed GPPL method is trained using the pairwise labels for the training folds. We rank arguments by their expected convincingness,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Comparison", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "E[f (x i )] \u2248f (x i )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Comparison", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "for each argument i with feature vector x i , under the approximate posterior q(f ) output by our SVI algorithm. We obtain classification probabilities using Equation 3 but accommodate the posterior covariance, C, of f , by replacing z wit\u0125", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Comparison", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "z = (f (x i )\u2212f (x j ))/ 2 + C ii + C jj \u2212 C ij \u2212 C ji .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Comparison", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We tested the sensitivity of GPPL to the choice of seed values for K-means++ by training the model on the same 31 folds of UKPConvArgStrict 20 times, each with a different random seed, then testing on the remaining fold. The resulting accuracy had a standard deviation of 0.03. In the following experiments, all methods were initialized and trained once for each fold of each experiment. We compare GPPL to an SVM with radial basis function kernel, and a bi-directional long shortterm memory network (BiLSTM), with 64 output nodes in the core LSTM layer. The SVM and BiL-STM were tested by Habernal and Gurevych (2016) and are available in our software repository. To apply SVM and BiLSTM to the classification task, we concatenate the feature vectors of each pair of arguments and train on the pairwise labels. For ranking, PageRank is first applied to arguments in the train-ing folds to obtain scores from the pairwise labels, which are then used to train the SVM and BiLSTM regression models.", |
| "cite_spans": [ |
| { |
| "start": 590, |
| "end": 618, |
| "text": "Habernal and Gurevych (2016)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Comparison", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "As a Bayesian alternative to GPPL, we test a Gaussian process classifier (GPC) for the classification task by concatenating the feature vectors of arguments in the same way as the SVM classifier. We also evaluate a non-Bayesian approach that infers function values using the same pairwise preference likelihood (PL) as GPPL (Equation 3), but uses them to train an SVM regression model instead of a GP. We refer to this method as PL+SVR.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Comparison", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We use two sets of input features. The ling feature set contains 32, 010 linguistic features, including unigrams, bigrams, parts-of-speech (POS) ngrams, production rules, ratios and counts of word length, punctuation and verb forms, dependency tree depth, named entity type counts, readability measures, sentiment scores, and spell-checking. The GloVe features are word embeddings with 300 dimensions. Both feature sets were developed by Habernal and Gurevych (2016) . We also evaluate a combination of both feature sets, ling + GloVe. To create a single embedding vector per argument as input for GPPL, we take the mean of individual word embeddings for tokens in the argument. We also tested skip-thoughts (Kiros et al., 2015) and Siamese-CBOW (Kenter et al., 2016) with GPPL on UKPConvArgStrict and UKPConvArgRank, both with MLII optimization and the median heuristic, both alone and combined with ling. However, we found that mean GloVe embeddings produced substantially better performance in all tests. To input the argument-level ling features to BiLSTM, we extend the network by adding a dense layer with 64 nodes.", |
| "cite_spans": [ |
| { |
| "start": 438, |
| "end": 466, |
| "text": "Habernal and Gurevych (2016)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 708, |
| "end": 728, |
| "text": "(Kiros et al., 2015)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 746, |
| "end": 767, |
| "text": "(Kenter et al., 2016)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Comparison", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We set the GPPL hyper-parameters a 0 = 2 and b 0 = 200 by comparing training set performance on UKPConvArgStrict and UKPConvArgRank against a 0 = 2, b 0 = 20000 and a 0 = 2, b 0 = 2. The chosen prior is very weakly informative, favoring a moderate level of noise in the pairwise labels. For the kernel function, k d , we used the Mat\u00e9rn 3 2 function as it has been shown to outperform other commonlyused kernels, such as RBF, across a wide range of tasks (Rasmussen and Williams, 2006) . We defer evaluating other kernel functions to future work. To set length-scales, l d , we compare the median heuristic (labeled \"medi.\") with MLII optimization using an L-BFGS optimizer (\"opt.\"). Experiment 2 shows how the number of inducing points, M , can be set to trade off speed and accuracy. Following those results, we set M = 500 for Experiments 3, 4 and 5 and M = N for the toy dataset in Experiment 1.", |
| "cite_spans": [ |
| { |
| "start": 455, |
| "end": 485, |
| "text": "(Rasmussen and Williams, 2006)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method Comparison", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "To illustrate some key differences between GPPL, SVM and PageRank, we simulate four scenarios, each of which contains arguments labeled arg0 to arg4. In each scenario, we generate a set of pairwise preference labels according to the graphs shown in Figure 2 . Each scenario is repeated 25 times: in each repeat, we select arguments at random from one fold of UKPConvArgStrict then associate the mean GloVe embeddings for these arguments with the labels arg0 to arg4. We train GPPL, PageRank and the SVM classifier on the preference pairs shown in each graph and predict ranks and pairwise labels for arguments arg0 to arg4. In the \"no cycle\" scenario, arg0 is preferred to both arg1 and arg2, which is reflected in the scores predicted by PageRank and GPPL in Figure 3 . However, arg3 and arg4 are not connected to the rest of the graph, and PageRank and GPPL score them differently. Figure 4 shows how GPPL provides less confident classifications for pairs that were not yet observed, e.g. arg2 arg4, in contrast with the dis- The next scenario shows a \"single cycle\" in the preference graph. Both PageRank and GPPL produce equal values for the arguments in the cycle (arg0, arg1, arg2). PageRank assigns lower scores to both arg3 and arg4 than the arguments in the cycle, while GPPL more intuitively gives a higher score to arg3, which was preferred to arg4. SVM predicts that arg0 and arg1 are preferred over arg3, although arg0 and arg1 are in a cycle so there is no reason to prefer them. GPPL, in contrast, weakly predicts that arg3 is preferred.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 249, |
| "end": 257, |
| "text": "Figure 2", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 760, |
| "end": 768, |
| "text": "Figure 3", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 884, |
| "end": 892, |
| "text": "Figure 4", |
| "ref_id": "FIGREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment 1: Toy Data", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "The \"double cycle\" scenario contains two paths from arg2 to arg0, via arg1 or arg3, and one conflicting preference arg2 arg0. GPPL scores the arguments as if the single conflicting preference, arg2 arg0, is less important than the two parallel paths from arg2 to arg0. In contrast, PageRank gives high scores to both arg0 and arg2. The classifications by GPPL and SVM are similar, but GPPL produces more uncertain predictions than in the first scenario due to the conflict.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment 1: Toy Data", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "Finally, Figure 3d shows the addition of 9 undecided labels to the \"no cycle\" scenario, indicated by undirected edges in Figure 2 , to simulate multiple annotators viewing the pair without being able to choose the most convincing argument. The SVM and PageRank are unaffected as they cannot be trained using the undecided labels. However, the GPPL classifications are less confident and the difference in GPPL scores between arg0 and the other arguments decreases, since GPPL gives the edge from arg2 to arg0 less weight.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 9, |
| "end": 18, |
| "text": "Figure 3d", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 121, |
| "end": 129, |
| "text": "Figure 2", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment 1: Toy Data", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "In conclusion, GPPL appears to resolve conflicts in the preference graphs more intuitively than PageRank, which was designed to rank web pages by importance rather than preference. In contrast to SVM, GPPL is able to account for cycles and undecided labels to soften its predictions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment 1: Toy Data", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "We analyze empirically the scalability of the proposed SVI method for GPPL using the UKPConvA-rgStrict dataset. Figure 6 shows the effect of varying the number of inducing points, M , on the overall runtime and accuracy of the method. The accuracy increases quickly with M , and flattens out, suggesting there is little benefit to increasing M further on this dataset. The runtimes increase with M , and are much longer with 32, 310 features than with 300 features. The difference is due to the cost of computing the kernel, which is linear in M , With only 300 features, the Figure 6b runtime appears polynomial, reflecting the O(M 3 ) term in the inference procedure. We tested GPPL with both the SVI algorithm, with M = 100 and P n = 200, and variational inference without inducing points or stochastic updates (labeled \"no SVI\") with different sizes of training dataset subsampled from UKPConvArgStrict. The results are shown in Figure 5a . For GPPL with SVI, the runtime increases very little with dataset size, while the runtime with \"no SVI\" increases polynomially with training set size (both N and P ). At N = 100, the number of inducing points is M = N but the SVI algorithm is still faster due to the stochastic updates with P n = 200 P pairs. Figure 5b shows the effect of the number of features, D, on runtimes. Runtimes for GPPL increase by a large amount with D = 32, 310, because the SVI method computes the kernel matrix, K mm , with computational complexity O(D). While D is small, other costs dominate. We show runtimes using the MLII optimization procedure with GPPL in Figure 5c . Owing to the long computation times required, the procedure was limited to a maximum of 25 iterations and did not terminate in fewer than 25 in any of the test runs. This creates a similar pattern to Figure 5b (approximately multiples of 50).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 112, |
| "end": 120, |
| "text": "Figure 6", |
| "ref_id": "FIGREF8" |
| }, |
| { |
| "start": 576, |
| "end": 585, |
| "text": "Figure 6b", |
| "ref_id": "FIGREF8" |
| }, |
| { |
| "start": 933, |
| "end": 942, |
| "text": "Figure 5a", |
| "ref_id": "FIGREF7" |
| }, |
| { |
| "start": 1255, |
| "end": 1264, |
| "text": "Figure 5b", |
| "ref_id": "FIGREF7" |
| }, |
| { |
| "start": 1590, |
| "end": 1600, |
| "text": "Figure 5c", |
| "ref_id": "FIGREF7" |
| }, |
| { |
| "start": 1803, |
| "end": 1813, |
| "text": "Figure 5b", |
| "ref_id": "FIGREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment 2: Scalability", |
| "sec_num": "6.4" |
| }, |
| { |
| "text": "We include runtimes for SVM and BiLSTM in Figures 5a and 5c to show their runtime patterns, but note that the runtimes reflect differences in implementations and system hardware. Both SVM and GPPL were run on an Intel i7 quad-core desktop. For SVM we used LibSVM version 3.2, which could be sped up if probability estimates were not required. BiLSTM was run with Theano 0.7 2 on an Nvidia Tesla P100 GPU. We can see in Figure 5c that the runtime for BiLSTM does not appear to increase due to the number of features, while that of SVM increases sharply with 32, 310 features. In Figure 5a , we observe the SVM runtimes increase polynomially with training set size.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 42, |
| "end": 59, |
| "text": "Figures 5a and 5c", |
| "ref_id": "FIGREF7" |
| }, |
| { |
| "start": 419, |
| "end": 429, |
| "text": "Figure 5c", |
| "ref_id": "FIGREF7" |
| }, |
| { |
| "start": 579, |
| "end": 588, |
| "text": "Figure 5a", |
| "ref_id": "FIGREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment 2: Scalability", |
| "sec_num": "6.4" |
| }, |
| { |
| "text": "We compare classification performance on UKP-ConvArgStrict and ranking performance on UKP-ConvArgRank. The results in Table 2 show that when using ling features, GPPL produces similar accuracy and improves the area under the ROC curve (AUC) by .02 and cross entropy error (CEE) by .01. AUC quantifies how well the predicted probabilities separate the classes, while CEE quantifies the usefulness of the probabilities output by each method. Much larger improvements can be seen in the ranking metrics. When GPPL is run with GloVe, it performs worse than BiLSTM for classification but improves the ranking metrics. Using a combination of features improves all methods, suggesting that embeddings and linguistic features contain complementary information. This improvement is statistically significant (p .01 using two-tailed Wilcoxon signed-rank test) for SVM with all metrics except accuracy, for BiLSTM with AUC only, and for GPPL medi. with Pearson correlation only.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 118, |
| "end": 125, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment 3: UKPConvArgStrict and UKPConvArgRank", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "Optimizing the length-scale using MLII improves classification accuracy by 1% over the median heuristic, and significantly improves accuracy (p = .043) and AUC (p = .013) over the previous stateof-the-art, SVM ling. However, the cost of these im- provements is that each fold required around 2 hours to compute instead of approximately 10 minutes on the same machine (Intel i7 quad-core desktop) using the median heuristic. The differences in all ranking metrics between GPPL opt. and SVM ling + GloVe are statistically significant, with p = .029 for Pearson's r and p .01 for both Spearman's \u03c1 and Kendall's \u03c4 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment 3: UKPConvArgStrict and UKPConvArgRank", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "GPC produces the best results on the classification task (p < .01 for all metrics compared to all other methods), indicating the benefits of a Bayesian approach over SVM and BiLSTM. However, unlike GPPL, GPC cannot be used to rank the arguments. The results also show that PL+SVR does not reach the same performance as GPPL, suggesting that GPPL may benefit from the Bayesian integration of a GP with the preference likelihood.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment 3: UKPConvArgStrict and UKPConvArgRank", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "We use UKPConvArgCrowdSample to introduce noisy data and conflicting pairwise labels to both the classification and regression tasks, to test the hypothesis that GPPL would best handle unreliable crowdsourced data. The evaluation uses gold labels from UKPConvArgStrict and UKPConvAr-gRank for the test set. The results in Table 3 show that all methods perform worse compared to Experiment 3 due to the presence of errors in the pairwise labels. Here, GPPL produces the best classification accuracy and cross-entropy error (significant with p .01 compared to all other methods except accuracy compared to GP+SVR, for which p = .045), while GPC has the highest AUC (p .01 compared to all except GP+SVR, which was not significant). Compared to UKPConvArgStrict, the clas-sification performance of GPC, SVM and BiLSTM decreased more than that of GPPL. These methods lack a mechanism to resolve conflicts in the preference graph, unlike GPPL and PL+SVR, which handle conflicts through the preference likelihood. PL+SVR again performs worse than GPPL on classification metrics, although its ranking performance is comparable. For ranking, GPPL again outperforms SVM and BiLSTM in all metrics (significant with p .01 in all cases except for SVM with Pearson's correlation).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 322, |
| "end": 329, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment 4: Conflicting and Noisy Data", |
| "sec_num": "6.6" |
| }, |
| { |
| "text": "In this experiment, we hypothesized that GPPL provides more meaningful confidence estimates than SVM or BiLSTM, which can be used to facilitate active learning in scenarios where labeled training data is expensive or initially unavailable. To test this hypothesis, we simulate an active learning scenario, in which an agent iteratively learns a model for each fold. Initially, 2 pairs are chosen at random, then used to train the classifier. The agent then performs uncertainty sampling (Settles, 2010) to select the 2 pairs with the least confident classifications. The labels for these pairs are then added to the training set and used to re-train the model. We repeated the process until 400 labels had been sampled.", |
| "cite_spans": [ |
| { |
| "start": 487, |
| "end": 502, |
| "text": "(Settles, 2010)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment 5: Active Learning", |
| "sec_num": "6.7" |
| }, |
| { |
| "text": "The result is plotted in Figure 7 , showing that GPPL reaches a mean accuracy of 70% with only 100 labels, while SVM and BiLSTM do not reach the same performance given 400 labels. After 100 labels, the performance of BiLSTM decreases. It has previously been shown Guyon et al., 2011; Settles, 2010 ) that uncertainty sampling sometimes causes accuracy to decrease. If the model overfits to a small dataset, it can mis-classify some data points with high confidence so that they are not selected and corrected by uncertainty sampling. The larger number of parameters in the BiLSTM may make it may more prone to overfitting with small datasets than SVM or GPPL. The Bayesian approach of GPPL aims to further reduce overfitting by accounting for parameter uncertainty. The results suggest that GPPL may be more suitable than the alternatives in cold-start scenarios with small amounts of labeled data. ", |
| "cite_spans": [ |
| { |
| "start": 264, |
| "end": 283, |
| "text": "Guyon et al., 2011;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 284, |
| "end": 297, |
| "text": "Settles, 2010", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 25, |
| "end": 33, |
| "text": "Figure 7", |
| "ref_id": "FIGREF9" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment 5: Active Learning", |
| "sec_num": "6.7" |
| }, |
| { |
| "text": "We now examine the length-scales learned by optimizing GPPL using MLII to identify informative features. A larger length-scale causes greater smoothing, implying that the feature is less relevant when predicting the convincingness function than a feature with a small length-scale. Figure 8 shows the distribution of length-scales for each category of ling+GloVe features, averaged over the folds in UKPConvArgStrict where MLII optimization improved accuracy by 3%. The length-scales were normalized by dividing by their median heuristic values, which were their initial values before optimization. The widest distributions of length-scales are for the mean word embeddings and the \"other\" category. A very large number of features have lengthscales close to 1, which may mean that they are weakly informative, as their length-scales have not been increased, or that there was insufficient data or time to learn their length-scales. To limit computation time, the optimization algorithm was restricted to 25 iterations, so may only have fully optimized features with larger gradients, leaving other features with normalized length-scales close to 1. Table 4 shows features with length-scales < 0.99, of which there are two production rule features and 18 POS-n-gram features, suggesting that the latter may capture more relevant aspects of grammar for convincingness. For n-grams, the relationship to convincingness may be topic-specific, hence they are not identified as important when the model is trained on 31 different topics. The fact that MLII did not substantially shorten the length-scales for ngrams and POS n-grams corresponds to previous re-sults (Persing and Ng, 2017) , which found these feature sets less informative than other argumentrelated feature sets. Table 4 also presents a breakdown of the \"other\" features into sentiment, ratio, count and NER features. The shortest length-scales are for sentiment features, pointing to a possible link between argumentation quality and sentiment. However, \"Very-Positive\" was the feature with the largest lengthscale, either because the median was a poor heuristic in this case or because the feature was uninformative, perhaps because sarcastic statements can be confused with highly positive sentiment. The short length-scale for the \"words > 6 letters\" ratio suggest that some surface features may be informative, despite previous work (Wei et al., 2016a) finding a set of surface features less informative than other feature sets. In this case, longer words may relate to more sophisticated and convincing arguments. ", |
| "cite_spans": [ |
| { |
| "start": 1659, |
| "end": 1681, |
| "text": "(Persing and Ng, 2017)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 2398, |
| "end": 2417, |
| "text": "(Wei et al., 2016a)", |
| "ref_id": "BIBREF46" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 282, |
| "end": 290, |
| "text": "Figure 8", |
| "ref_id": "FIGREF11" |
| }, |
| { |
| "start": 1150, |
| "end": 1157, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 1773, |
| "end": 1780, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Relevant Feature Determination", |
| "sec_num": "6.8" |
| }, |
| { |
| "text": "We compared the errors when using GPPL opt. with mean GloVe embeddings and with linguistic features. We manually inspected the 25 arguments most frequently mis-classified by GPPL ling and correctly classified by GPPL GloVe. We found that GPPL ling mistakenly marked several arguments as less convincing when they contained grammar and spelling errors but otherwise made a logical point. In contrast, arguments that did not strongly take a side and did not contain language errors were often marked mistakenly as more convincing. We also examined the 25 arguments most frequently misclassified by GPPL GloVe but not by GPPL ling. Of the arguments that GPPL GloVe incorrectly marked as more convincing, 10 contained multiple exclamation marks and all-caps sentences. Other failures were very short arguments and under- rating arguments containing the term 'rape'. The analysis suggests that the different feature sets identify different aspects of convincingness.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "6.9" |
| }, |
| { |
| "text": "To investigate the differences between our best approach, GPPL opt. ling + GloVe, and the previous best performer, SVM ling, we manually examined 40 randomly chosen false classifications, where one of either ling + GloVe or SVM was correct and the other was incorrect. We found that both SVM and GPPL falsely classified arguments that were either very short or long and complex, suggesting deeper semantic or structural understanding of the argument may be required. However, SVM also made mistakes where the arguments contained few verbs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "6.9" |
| }, |
| { |
| "text": "We also compared the rankings produced by GPPL opt. (ling+GloVe), and SVM on UKPCon-vArgRank by examining the 20 largest deviations from the gold standard rank for each method. Arguments underrated by SVM and not GPPL often contained exclamation marks or common spelling errors (likely due to unigram or bigram features). GPPL underrated short arguments with the ngrams \"I think\", \"why?\", and \"don't know\", which were used as part of a rhetorical question rather than to state that the author was uncertain or uninformed. These cases may not be distinguishable by a GP given only ling + GloVe features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "6.9" |
| }, |
| { |
| "text": "An expected advantage of GPPL is that it provides more meaningful uncertainty estimates for tasks such as active learning. We examined whether erroneous classifications correspond to more uncertain predictions with GPPL ling and SVM ling. For UKPConvArgStrict, the mean Shannon entropy of the pairwise predictions from GPPL was .129 for correct predictions and 2.443 for errors, while for SVM, the mean Shannon entropy was .188 for correct predictions and 1.583 for incorrect. With both methods, more uncertain (higher entropy) predictions correlate with more errors, but the more extreme values for GPPL suggest that its output probabilities more accurately reflect uncertainty than those produced by the SVM.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "6.9" |
| }, |
| { |
| "text": "We presented a novel Bayesian approach to predicting argument convincingness from pairwise labels using Gaussian process preference learning (GPPL). Using recent advances in approximate inference, we developed a scalable algorithm for GPPL that is suitable for large NLP datasets. Our experiments demonstrated that our method significantly outperforms the state-of-the-art on a benchmark dataset for argument convincingness, particularly when noisy and conflicting pairwise labels are used in training. Active learning experiments showed that GPPL is an effective model for cold-start situations and that the convincingness of Internet arguments can be predicted reasonably well given only a small number of samples. The results also showed that linguistic features and word embeddings provide complementary information, and that GPPL can be used to automatically identify relevant features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Future work will evaluate our approach on other NLP tasks where reliable classifications may be difficult to obtain, such as learning to classify text from implicit user feedback (Joachims, 2002) . We also plan to investigate training the GP using absolute scores in combination with pairwise labels.", |
| "cite_spans": [ |
| { |
| "start": 179, |
| "end": 195, |
| "text": "(Joachims, 2002)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "https://github.com/ukplab/ tacl2018-preference-convincing", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://deeplearning.net/software/ theano/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work has been supported by the German Federal Ministry of Education and Research (BMBF) under the promotional reference 01UG1416B (CEDIFOR). It also received funding from the European Unions Horizon 2020 research and innovation programme (H2020-EINFRA-2014-2) under grant agreement No. 654021 (Open-MinTeD). It reflects only the authors views and the EU is not liable for any use that may be made of the information contained therein. We would like to thank the TACL editors and reviewers for their effort and the valuable feedback we received from them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "k-means++: the advantages of careful seeding", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Arthur", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergei", |
| "middle": [], |
| "last": "Vassilvitskii", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete Algorithms", |
| "volume": "", |
| "issue": "", |
| "pages": "1027--1035", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Arthur and Sergei Vassilvitskii. 2007. k-means++: the advantages of careful seeding. In Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Dis- crete Algorithms, pages 1027-1035. Society for Indus- trial and Applied Mathematics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Joint emotion analysis via multi-task Gaussian processes", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Beck", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1798--1803", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Beck, Trevor Cohn, and Lucia Specia. 2014. Joint emotion analysis via multi-task Gaussian processes. In Proceedings of the 2014 Conference on Empiri- cal Methods in Natural Language Processing, pages 1798-1803. ACL.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Latent Dirichlet allocation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael I Jordan", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "993--1022", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David M Blei, Andrew Y Ng, and Michael I Jordan. 2003. Latent Dirichlet allocation. Journal of Machine Learning Research, 3(Jan):993-1022.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The fake, the flimsy, and the fallacious: demarcating arguments in real life", |
| "authors": [ |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Boudry", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Paglieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Pigliucci", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Argumentation", |
| "volume": "29", |
| "issue": "4", |
| "pages": "431--456", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maarten Boudry, Fabio Paglieri, and Massimo Pigli- ucci. 2015. The fake, the flimsy, and the fallacious: demarcating arguments in real life. Argumentation, 29(4):431-456.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Rank analysis of incomplete block designs: I. the method of paired comparisons", |
| "authors": [ |
| { |
| "first": "Allan", |
| "middle": [], |
| "last": "Ralph", |
| "suffix": "" |
| }, |
| { |
| "first": "Milton", |
| "middle": [ |
| "E" |
| ], |
| "last": "Bradley", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Terry", |
| "suffix": "" |
| } |
| ], |
| "year": 1952, |
| "venue": "Biometrika", |
| "volume": "39", |
| "issue": "3/4", |
| "pages": "324--345", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ralph Allan Bradley and Milton E Terry. 1952. Rank analysis of incomplete block designs: I. the method of paired comparisons. Biometrika, 39(3/4):324-345.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Baseline methods for active learning", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Gavin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cawley", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Active Learning and Experimental Design Workshop in Conjunction with AISTATS 2010", |
| "volume": "", |
| "issue": "", |
| "pages": "47--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gavin C Cawley. 2011. Baseline methods for active learning. In Proceedings of the Active Learning and Experimental Design Workshop in Conjunction with AISTATS 2010, pages 47-57.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Joint inference over a lightly supervised information extraction pipeline: Towards event coreference resolution for resource-scarce languages", |
| "authors": [ |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "2913--2920", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen Chen and Vincent Ng. 2016. Joint inference over a lightly supervised information extraction pipeline: To- wards event coreference resolution for resource-scarce languages. In Proceedings of the Thirtieth AAAI Con- ference on Artificial Intelligence, pages 2913-2920.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Preference learning with Gaussian processes", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zoubin", |
| "middle": [], |
| "last": "Ghahramani", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 22nd International Conference on Machine learning", |
| "volume": "", |
| "issue": "", |
| "pages": "137--144", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Chu and Zoubin Ghahramani. 2005. Preference learning with Gaussian processes. In Proceedings of the 22nd International Conference on Machine learn- ing, pages 137-144. ACM.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Modelling annotator bias with multi-task Gaussian processes: An application to machine translation quality estimation", |
| "authors": [ |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "32--42", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Trevor Cohn and Lucia Specia. 2013. Modelling anno- tator bias with multi-task Gaussian processes: An ap- plication to machine translation quality estimation. In Proceedings of the 51st Annual Meeting of the Associ- ation for Computational Linguistics (Volume 1: Long Papers), pages 32-42.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Natural language processing (almost) from scratch", |
| "authors": [ |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Karlen", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Kuksa", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2493--2537", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural language processing (almost) from scratch. Journal of Machine Learning Research, 12(Aug):2493-2537.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Semantic annotation aggregation with conditional crowdsourcing models and word embeddings", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Felt", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "K" |
| ], |
| "last": "Ringger", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [ |
| "D" |
| ], |
| "last": "Seppi", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "1787--1796", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Felt, Eric K. Ringger, and Kevin D. Seppi. 2016. Semantic annotation aggregation with conditional crowdsourcing models and word embeddings. In Pro- ceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 1787-1796.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Optimal kernel choice for large-scale two-sample tests", |
| "authors": [ |
| { |
| "first": "Arthur", |
| "middle": [], |
| "last": "Gretton", |
| "suffix": "" |
| }, |
| { |
| "first": "Dino", |
| "middle": [], |
| "last": "Sejdinovic", |
| "suffix": "" |
| }, |
| { |
| "first": "Heiko", |
| "middle": [], |
| "last": "Strathmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Sivaraman", |
| "middle": [], |
| "last": "Balakrishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimiliano", |
| "middle": [], |
| "last": "Pontil", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenji", |
| "middle": [], |
| "last": "Fukumizu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bharath", |
| "middle": [ |
| "K" |
| ], |
| "last": "Sriperumbudur", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1205--1213", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arthur Gretton, Dino Sejdinovic, Heiko Strathmann, Sivaraman Balakrishnan, Massimiliano Pontil, Kenji Fukumizu, and Bharath K. Sriperumbudur. 2012. Op- timal kernel choice for large-scale two-sample tests. In Advances in Neural Information Processing Systems, pages 1205-1213.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Results of the active learning challenge", |
| "authors": [ |
| { |
| "first": "Isabelle", |
| "middle": [], |
| "last": "Guyon", |
| "suffix": "" |
| }, |
| { |
| "first": "Gavin", |
| "middle": [], |
| "last": "Cawley", |
| "suffix": "" |
| }, |
| { |
| "first": "Gideon", |
| "middle": [], |
| "last": "Dror", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Lemaire", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Active Learning and Experimental Design Workshop in Conjunction with AIS-TATS 2010", |
| "volume": "", |
| "issue": "", |
| "pages": "19--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Isabelle Guyon, Gavin Cawley, Gideon Dror, and Vincent Lemaire. 2011. Results of the active learning chal- lenge. In Proceedings of the Active Learning and Ex- perimental Design Workshop in Conjunction with AIS- TATS 2010, pages 19-45.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Which argument is more convincing? Analyzing and predicting convincingness of Web arguments using bidirectional LSTM", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Habernal", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1589--1599", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Habernal and Iryna Gurevych. 2016. Which argu- ment is more convincing? Analyzing and predicting convincingness of Web arguments using bidirectional LSTM. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 1589-1599.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Scalable Variational Gaussian Process Classification", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Hensman", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "G" |
| ], |
| "last": "De", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Matthews", |
| "suffix": "" |
| }, |
| { |
| "first": "Zoubin", |
| "middle": [], |
| "last": "Ghahramani", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Eighteenth International Conference on Artificial Intelligence and Statistics", |
| "volume": "", |
| "issue": "", |
| "pages": "351--360", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Hensman, Alexander G. de G. Matthews, and Zoubin Ghahramani. 2015. Scalable Variational Gaussian Process Classification. In Proceedings of the Eighteenth International Conference on Artificial In- telligence and Statistics, pages 351-360.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Stochastic variational inference", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [ |
| "D" |
| ], |
| "last": "Hoffman", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "M" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "Chong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "William" |
| ], |
| "last": "Paisley", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "14", |
| "issue": "1", |
| "pages": "1303--1347", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew D. Hoffman, David M. Blei, Chong Wang, and John William Paisley. 2013. Stochastic variational inference. Journal of Machine Learning Research, 14(1):1303-1347.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Learning whom to trust with MACE", |
| "authors": [ |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Taylor", |
| "middle": [], |
| "last": "Berg-Kirkpatrick", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [ |
| "H" |
| ], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of NAACL-HLT 2013", |
| "volume": "", |
| "issue": "", |
| "pages": "1120--1130", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dirk Hovy, Taylor Berg-Kirkpatrick, Ashish Vaswani, and Eduard H. Hovy. 2013. Learning whom to trust with MACE. In Proceedings of NAACL-HLT 2013, pages 1120-1130. ACL.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Optimizing search engines using clickthrough data", |
| "authors": [ |
| { |
| "first": "Thorsten", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the Eighth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "133--142", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thorsten Joachims. 2002. Optimizing search engines us- ing clickthrough data. In Proceedings of the Eighth ACM SIGKDD International Conference on Knowl- edge Discovery and Data Mining, pages 133-142. ACM.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Rank correlation methods", |
| "authors": [ |
| { |
| "first": "Maurice", |
| "middle": [ |
| "George" |
| ], |
| "last": "Kendall", |
| "suffix": "" |
| } |
| ], |
| "year": 1948, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maurice George Kendall. 1948. Rank correlation meth- ods. Griffin.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Siamese CBOW: Optimizing word embeddings for sentence representations", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kenter", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexey", |
| "middle": [], |
| "last": "Borisov", |
| "suffix": "" |
| }, |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "De Rijke", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the The 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "941--951", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kenter, Alexey Borisov, and Maarten de Rijke. 2016. Siamese CBOW: Optimizing word embeddings for sentence representations. In Proceedings of the The 54th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers), pages 941-951.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "A Bayesian approach to argument-based reasoning for attack estimation", |
| "authors": [ |
| { |
| "first": "Hiroyuki", |
| "middle": [], |
| "last": "Kido", |
| "suffix": "" |
| }, |
| { |
| "first": "Keishi", |
| "middle": [], |
| "last": "Okamoto", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "249--255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hiroyuki Kido and Keishi Okamoto. 2017. A Bayesian approach to argument-based reasoning for attack es- timation. In Proceedings of the Twenty-Sixth Inter- national Joint Conference on Artificial Intelligence, pages 249-255.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Preference uncertainty, preference refinement and paired comparison experiments", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kingsley", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Land Economics", |
| "volume": "86", |
| "issue": "3", |
| "pages": "530--544", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David C. Kingsley and Thomas C. Brown. 2010. Preference uncertainty, preference refinement and paired comparison experiments. Land Economics, 86(3):530-544.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Skip-thought vectors", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "Yukun", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [ |
| "R" |
| ], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Zemel", |
| "suffix": "" |
| }, |
| { |
| "first": "Raquel", |
| "middle": [], |
| "last": "Urtasun", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Torralba", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanja", |
| "middle": [], |
| "last": "Fidler", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3294--3302", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Kiros, Yukun Zhu, Ruslan R. Salakhutdinov, Richard Zemel, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Skip-thought vectors. In Advances in Neural Information Processing Systems, pages 3294-3302.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Predicting and characterising user impact on Twitter", |
| "authors": [ |
| { |
| "first": "Vasileios", |
| "middle": [], |
| "last": "Lampos", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikolaos", |
| "middle": [], |
| "last": "Aletras", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Preo\u0163iuc-Pietro", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "405--413", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vasileios Lampos, Nikolaos Aletras, Daniel Preo\u0163iuc- Pietro, and Trevor Cohn. 2014. Predicting and charac- terising user impact on Twitter. In Proceedings of the 14th Conference of the European Chapter of the Asso- ciation for Computational Linguistics, pages 405-413.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Learning mallows models with pairwise preferences", |
| "authors": [ |
| { |
| "first": "Tyler", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Craig", |
| "middle": [], |
| "last": "Boutilier", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 28th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "145--152", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tyler Lu and Craig Boutilier. 2011. Learning mallows models with pairwise preferences. In Proceedings of the 28th International Conference on Machine Learn- ing, pages 145-152.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "On the possible psychophysical laws", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Duncan", |
| "middle": [], |
| "last": "Luce", |
| "suffix": "" |
| } |
| ], |
| "year": 1959, |
| "venue": "Psychological Review", |
| "volume": "66", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Duncan Luce. 1959. On the possible psychophysical laws. Psychological Review, 66(2):81.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Argument strength is in the eye of the beholder: Audience effects in persuasion", |
| "authors": [ |
| { |
| "first": "Stephanie", |
| "middle": [], |
| "last": "Lukin", |
| "suffix": "" |
| }, |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Anand", |
| "suffix": "" |
| }, |
| { |
| "first": "Marilyn", |
| "middle": [], |
| "last": "Walker", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Whittaker", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "742--753", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephanie Lukin, Pranav Anand, Marilyn Walker, and Steve Whittaker. 2017. Argument strength is in the eye of the beholder: Audience effects in persuasion. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguis- tics, pages 742-753.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Non-null ranking models. i", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Colin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mallows", |
| "suffix": "" |
| } |
| ], |
| "year": 1957, |
| "venue": "Biometrika", |
| "volume": "44", |
| "issue": "1", |
| "pages": "114--130", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin L Mallows. 1957. Non-null ranking models. i. Biometrika, 44(1/2):114-130.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Why do humans reason? Arguments for an argumentative theory. Behavioral and Brain Sciences", |
| "authors": [ |
| { |
| "first": "Hugo", |
| "middle": [], |
| "last": "Mercier", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Sperber", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "34", |
| "issue": "", |
| "pages": "57--74", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hugo Mercier and Dan Sperber. 2011. Why do humans reason? Arguments for an argumentative theory. Be- havioral and Brain Sciences, 34(2):57-74.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "A reinforcement learning approach to improve the argument selection effectiveness in argumentation-based negotiation", |
| "authors": [ |
| { |
| "first": "Ariel", |
| "middle": [], |
| "last": "Monteserin", |
| "suffix": "" |
| }, |
| { |
| "first": "Anal\u00eda", |
| "middle": [], |
| "last": "Amandi", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Expert Systems with Applications", |
| "volume": "40", |
| "issue": "6", |
| "pages": "2182--2188", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ariel Monteserin and Anal\u00eda Amandi. 2013. A rein- forcement learning approach to improve the argument selection effectiveness in argumentation-based negoti- ation. Expert Systems with Applications, 40(6):2182- 2188.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Remarks on the method of paired comparisons: I. The least squares solution assuming equal standard deviations and equal correlations", |
| "authors": [ |
| { |
| "first": "Frederick", |
| "middle": [], |
| "last": "Mosteller", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Selected Papers of Frederick Mosteller", |
| "volume": "", |
| "issue": "", |
| "pages": "157--162", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Frederick Mosteller. 2006. Remarks on the method of paired comparisons: I. The least squares solution as- suming equal standard deviations and equal correla- tions. In Selected Papers of Frederick Mosteller, pages 157-162. Springer.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Approximations for binary Gaussian process classification", |
| "authors": [ |
| { |
| "first": "Hannes", |
| "middle": [], |
| "last": "Nickisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Carl", |
| "middle": [ |
| "Edward" |
| ], |
| "last": "Rasmussen", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "9", |
| "issue": "", |
| "pages": "2035--2078", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hannes Nickisch and Carl Edward Rasmussen. 2008. Approximations for binary Gaussian process classi- fication. Journal of Machine Learning Research, 9(Oct):2035-2078.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Why can't you convince me? Modeling weaknesses in unpersuasive arguments", |
| "authors": [ |
| { |
| "first": "Isaac", |
| "middle": [], |
| "last": "Persing", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 26th International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "4082--4088", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Isaac Persing and Vincent Ng. 2017. Why can't you convince me? Modeling weaknesses in unpersua- sive arguments. In Proceedings of the 26th Inter- national Joint Conference on Artificial Intelligence, pages 4082-4088.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "The analysis of permutations", |
| "authors": [ |
| { |
| "first": "Robin", |
| "middle": [ |
| "L" |
| ], |
| "last": "Plackett", |
| "suffix": "" |
| } |
| ], |
| "year": 1975, |
| "venue": "Applied Statistics", |
| "volume": "", |
| "issue": "", |
| "pages": "193--202", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robin L. Plackett. 1975. The analysis of permutations. Applied Statistics, pages 193-202.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Gaussian processes for machine learning", |
| "authors": [ |
| { |
| "first": "Carl", |
| "middle": [ |
| "E" |
| ], |
| "last": "Rasmussen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "K I" |
| ], |
| "last": "Williams", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "38", |
| "issue": "", |
| "pages": "715--719", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carl E. Rasmussen and Christopher K. I. Williams. 2006. Gaussian processes for machine learning. The MIT Press, Cambridge, MA, USA, 38:715-719.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Determining intent using hard/soft data and Gaussian process classifiers", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Reece", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Nicholson", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Lloyd", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 14th International Conference on Information Fusion", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Reece, Stephen Roberts, David Nicholson, and Chris Lloyd. 2011. Determining intent using hard/soft data and Gaussian process classifiers. In Proceedings of the 14th International Conference on Information Fusion, pages 1-8. IEEE.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Providing arguments in discussions on the basis of the prediction of human argumentative behavior", |
| "authors": [ |
| { |
| "first": "Ariel", |
| "middle": [], |
| "last": "Rosenfeld", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarit", |
| "middle": [], |
| "last": "Kraus", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "ACM Transactions on Interactive Intelligent Systems (TiiS)", |
| "volume": "6", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ariel Rosenfeld and Sarit Kraus. 2016. Providing argu- ments in discussions on the basis of the prediction of human argumentative behavior. ACM Transactions on Interactive Intelligent Systems (TiiS), 6(4):30.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Active learning literature survey", |
| "authors": [ |
| { |
| "first": "Burr", |
| "middle": [], |
| "last": "Settles", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "52", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Burr Settles. 2010. Active learning literature survey. University of Wisconsin, Madison, 52(55-66):11.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Language understanding in the wild: Combining crowdsourcing and machine learning", |
| "authors": [ |
| { |
| "first": "Edwin", |
| "middle": [ |
| "D" |
| ], |
| "last": "Simpson", |
| "suffix": "" |
| }, |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Venanzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Reece", |
| "suffix": "" |
| }, |
| { |
| "first": "Pushmeet", |
| "middle": [], |
| "last": "Kohli", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Guiver", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [ |
| "J" |
| ], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicholas", |
| "middle": [ |
| "R" |
| ], |
| "last": "Jennings", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 24th International Conference on World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "992--1002", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edwin D. Simpson, Matteo Venanzi, Steven Reece, Push- meet Kohli, John Guiver, Stephen J. Roberts, and Nicholas R. Jennings. 2015. Language understanding in the wild: Combining crowdsourcing and machine learning. In Proceedings of the 24th International Conference on World Wide Web, pages 992-1002.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Dropout: A simple way to prevent neural networks from overfitting", |
| "authors": [ |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Ruslan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Journal of machine learning research", |
| "volume": "15", |
| "issue": "1", |
| "pages": "1929--1958", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitish Srivastava, Geoffrey E. Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan R. Salakhutdinov. 2014. Dropout: A simple way to prevent neural networks from overfitting. Journal of machine learning re- search, 15(1):1929-1958.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Extended and unscented Gaussian processes", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Daniel", |
| "suffix": "" |
| }, |
| { |
| "first": "Edwin", |
| "middle": [ |
| "V" |
| ], |
| "last": "Steinberg", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bonilla", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1251--1259", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel M. Steinberg and Edwin V. Bonilla. 2014. Ex- tended and unscented Gaussian processes. In Ad- vances in Neural Information Processing Systems, pages 1251-1259.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Winning arguments: Interaction dynamics and persuasion strategies in good-faith online discussions", |
| "authors": [ |
| { |
| "first": "Chenhao", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Vlad", |
| "middle": [], |
| "last": "Niculae", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristian", |
| "middle": [], |
| "last": "Danescu-Niculescu-Mizil", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 25th International Conference on World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "613--624", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chenhao Tan, Vlad Niculae, Cristian Danescu- Niculescu-Mizil, and Lillian Lee. 2016. Winning arguments: Interaction dynamics and persuasion strategies in good-faith online discussions. In Pro- ceedings of the 25th International Conference on World Wide Web, pages 613-624.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Sharing clusters among related groups: Hierarchical Dirichlet processes", |
| "authors": [ |
| { |
| "first": "Yee", |
| "middle": [ |
| "W" |
| ], |
| "last": "Teh", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "I" |
| ], |
| "last": "Jordan", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [ |
| "J" |
| ], |
| "last": "Beal", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "M" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1385--1392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yee W. Teh, Michael I. Jordan, Matthew J. Beal, and David M. Blei. 2005. Sharing clusters among related groups: Hierarchical Dirichlet processes. In Advances in neural information processing systems, pages 1385- 1392.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "A law of comparative judgment", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Louis L Thurstone", |
| "suffix": "" |
| } |
| ], |
| "year": 1927, |
| "venue": "Psychological review", |
| "volume": "34", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Louis L Thurstone. 1927. A law of comparative judg- ment. Psychological review, 34(4):273.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "A Bayesian approach to unsupervised semantic role induction", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Klementiev", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 13th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "12--22", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Titov and Alexandre Klementiev. 2012. A Bayesian approach to unsupervised semantic role induction. In Proceedings of the 13th Conference of the European Chapter of the Association for Computational Linguis- tics, pages 12-22.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Argumentation quality assessment: Theory vs. practice", |
| "authors": [ |
| { |
| "first": "Henning", |
| "middle": [], |
| "last": "Wachsmuth", |
| "suffix": "" |
| }, |
| { |
| "first": "Nona", |
| "middle": [], |
| "last": "Naderi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Habernal", |
| "suffix": "" |
| }, |
| { |
| "first": "Yufang", |
| "middle": [], |
| "last": "Hou", |
| "suffix": "" |
| }, |
| { |
| "first": "Graeme", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| }, |
| { |
| "first": "Benno", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "250--255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Henning Wachsmuth, Nona Naderi, Ivan Habernal, Yu- fang Hou, Graeme Hirst, Iryna Gurevych, and Benno Stein. 2017. Argumentation quality assessment: The- ory vs. practice. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguis- tics (Volume 2: Short Papers), pages 250-255.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Is this post persuasive? Ranking argumentative comments in online forum", |
| "authors": [ |
| { |
| "first": "Zhongyu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "195--200", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhongyu Wei, Yang Liu, and Yi Li. 2016a. Is this post persuasive? Ranking argumentative comments in on- line forum. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Vol- ume 2: Short Papers), pages 195-200.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "A preliminary study of disputation behavior in online debating forum", |
| "authors": [ |
| { |
| "first": "Zhongyu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Yandi", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zachary", |
| "middle": [], |
| "last": "Stallbohm", |
| "suffix": "" |
| }, |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Third Workshop on Argument Mining (ArgMining2016)", |
| "volume": "", |
| "issue": "", |
| "pages": "166--171", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhongyu Wei, Yandi Xia, Chen Li, Yang Liu, Zachary Stallbohm, Yi Li, and Yang Jin. 2016b. A prelimi- nary study of disputation behavior in online debating forum. In Proceedings of the Third Workshop on Ar- gument Mining (ArgMining2016), pages 166-171.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Bayesian prediction of tissue-regulated splicing using RNA sequence and cellular context", |
| "authors": [ |
| { |
| "first": "Yoseph", |
| "middle": [], |
| "last": "Hui Yuan Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Brendan", |
| "middle": [ |
| "J" |
| ], |
| "last": "Barash", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Frey", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Bioinformatics", |
| "volume": "27", |
| "issue": "18", |
| "pages": "2554--2562", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hui Yuan Xiong, Yoseph Barash, and Brendan J Frey. 2011. Bayesian prediction of tissue-regulated splicing using RNA sequence and cellular context. Bioinfor- matics, 27(18):2554-2562.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Example argument pair from an online debate.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "text": "Created in Master PDF Editor -Demo Version Created in Master PDF Editor -Demo Version Created in Master PDF Editor -Demo Version Created in Master PDF Editor -Demo Version Created in Master PDF Editor -Demo Version Created in Master PDF Editor -Demo Version(d) no cycle + 9 undecided prefs.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF3": { |
| "text": "Argument preference graphs for each scenario. Arrows point to the preferred argument.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF4": { |
| "text": "Mean scores over 25 repeats. Bars for GPPL show standard deviation of convincingness function posterior.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF6": { |
| "text": "Mean GPPL (top row) and SVM (bottom row) predictions over 25 repeats. Probability that the argument on the horizontal axis the argument on the vertical axis.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF7": { |
| "text": "Runtimes for training+prediction on UKPConvArgStrict with different subsamples of data. Means over 32 runs. Note logarithmic x-axis for (b) and (c).", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF8": { |
| "text": "Effect of varying M on accuracy and runtime (training+prediction) of GPPL for UKPConvArgStrict. Means over 32 runs. crete classifications of the SVM.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF9": { |
| "text": "Active learning simulation showing mean accuracy of preference pair classifications over 32 runs.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF11": { |
| "text": "Histograms of mean normalized length-scales on folds where MLII improved performance > 3%.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "text": "Summary of datasets, showing the different steps used to produce each Internet argument dataset.", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "text": "Performance comparison on UKPConvA-rgCrowdSample using ling+GloVe features.", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF6": { |
| "text": "Normalized length-scales for linguistic features learned using MLII. Shows mean values over folds with > 3% improvement. Includes all values < 0.99, except for POS n-grams (only smallest 5 of 18 shown).", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |