| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:35:18.196660Z" |
| }, |
| "title": "What Makes a Scientific Paper be Accepted for Publication?", |
| "authors": [ |
| { |
| "first": "Panagiotis", |
| "middle": [], |
| "last": "Fytas", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Imperial College London", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Georgios", |
| "middle": [], |
| "last": "Rizos", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Imperial College London", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "georgios.rizos12@imperial.ac.uk" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Imperial College London", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "l.specia@imperial.ac.uk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Despite peer-reviewing being an essential component of academia since the 1600s, it has repeatedly received criticisms for lack of transparency and consistency. We posit that recent work in machine learning and explainable AI provide tools that enable insights into the decisions from a given peer-review process. We start by simulating the peer-review process using an ML classifier and extracting global explanations in the form of linguistic features that affect the acceptance of a scientific paper for publication on an open peerreview dataset. Second, since such global explanations do not justify causal interpretations, we propose a methodology for detecting confounding effects in natural language and generating explanations, disentangled from textual confounders, in the form of lexicons. Our proposed linguistic explanation methodology indicates the following on a case dataset of ICLR submissions: a) the organising committee follows, for the most part, the recommendations of reviewers, and b) the paper's main characteristics that led to reviewers recommending acceptance for publication are originality, clarity and substance.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Despite peer-reviewing being an essential component of academia since the 1600s, it has repeatedly received criticisms for lack of transparency and consistency. We posit that recent work in machine learning and explainable AI provide tools that enable insights into the decisions from a given peer-review process. We start by simulating the peer-review process using an ML classifier and extracting global explanations in the form of linguistic features that affect the acceptance of a scientific paper for publication on an open peerreview dataset. Second, since such global explanations do not justify causal interpretations, we propose a methodology for detecting confounding effects in natural language and generating explanations, disentangled from textual confounders, in the form of lexicons. Our proposed linguistic explanation methodology indicates the following on a case dataset of ICLR submissions: a) the organising committee follows, for the most part, the recommendations of reviewers, and b) the paper's main characteristics that led to reviewers recommending acceptance for publication are originality, clarity and substance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The peer review process has been instrumental in academia for determining which papers meet the quality standards for publication in scientific journals and conferences. However, it has received several criticisms, including inconsistencies among review texts, review scores, and the final acceptance decision (Kravitz et al., 2010) , arbitrariness between different reviewer groups (Langford and Guzdial, 2015) as well as reviewer bias in \"singleblind\" peer reviews (Tomkins et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 310, |
| "end": 332, |
| "text": "(Kravitz et al., 2010)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 383, |
| "end": 411, |
| "text": "(Langford and Guzdial, 2015)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 467, |
| "end": 489, |
| "text": "(Tomkins et al., 2017)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Explainable AI (XAI) techniques have been shown to provide global understanding of the data behind a model. 1 Assume we build a classifier that 1 Among the several definitions for explainability and inter-predicts whether a paper is accepted for publication based on a peer review. Interpreting the decision of such a classifier can help comprehend what the reviewers value the most on a research paper. XAI allows us to elicit knowledge from the model about the data (Molnar, 2019) . However, in order to determine what aspects of a paper lead to its acceptance, we need to interpret the peer review classifier explanations causally. Generally, XAI methods do not provide such guarantees since most Machine Learning models simply detect correlations (Molnar, 2019) . Nevertheless, in recent years there has been a trend towards enabling Machine Learning to adjust for causal inference (Sch\u00f6lkopf, 2019) .", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 145, |
| "text": "1", |
| "ref_id": null |
| }, |
| { |
| "start": 468, |
| "end": 482, |
| "text": "(Molnar, 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 751, |
| "end": 765, |
| "text": "(Molnar, 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 886, |
| "end": 903, |
| "text": "(Sch\u00f6lkopf, 2019)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Simply interpreting a non-causal classifier does not suffice when attempting to gain insight into human decision-making in the peer review process. For instance, words such as \"gan\", and \"efficiency\" appear to be important in such a classifier (Section 4). We argue that correlation between such words and paper acceptance is confounded on the subject of the paper: different subjects have different probabilities of acceptance, as well as different degrees to which algorithmic \"efficiency\" is an important factor. Since paper subject ground truth is not necessarily available, we can treat the abstract of a paper as a proxy for its subject, similar to Veitch et al. (2019) .", |
| "cite_spans": [ |
| { |
| "start": 655, |
| "end": 675, |
| "text": "Veitch et al. (2019)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Previous approaches that aim to reconcile NLP with causal inference are limited to either binary treatment variables (Veitch et al., 2019; Roberts et al., 2020; Saha et al., 2019) or nominal confounders (Pryzant et al., 2017 (Pryzant et al., , 2018a . This paper takes a first step towards using learnt natural language embeddings both as the treatment and the confounder. We achieve this by generating deconfounded lexicons (Pryzant et al., 2018a ) through pretability (Lipton, 2016; Rudin, 2018; Murdoch et al., 2019) , we follow the definition of explainability as a plausible justification for the prediction of a model (Rudin, 2018) , and use the terms interpretability and explainability interchangeably.", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 138, |
| "text": "(Veitch et al., 2019;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 139, |
| "end": 160, |
| "text": "Roberts et al., 2020;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 161, |
| "end": 179, |
| "text": "Saha et al., 2019)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 203, |
| "end": 224, |
| "text": "(Pryzant et al., 2017", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 225, |
| "end": 249, |
| "text": "(Pryzant et al., , 2018a", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 425, |
| "end": 447, |
| "text": "(Pryzant et al., 2018a", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 470, |
| "end": 484, |
| "text": "(Lipton, 2016;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 485, |
| "end": 497, |
| "text": "Rudin, 2018;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 498, |
| "end": 519, |
| "text": "Murdoch et al., 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 624, |
| "end": 637, |
| "text": "(Rudin, 2018)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "interpreting causal models that predict the acceptance of a scientific paper, based on their peer reviews and confounded on their abstract. Our main contributions are:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We provide a methodology for developing text classifiers that are able to adjust for confounding effects expressed in natural language. Our evaluation is quantitative, reporting the Informativeness Coefficient measure (Pryzant et al., 2018a,b) , and we also showcase the highest scoring words from the lexicons.", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 245, |
| "text": "(Pryzant et al., 2018a,b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We extend the classifier of Pryzant et al. (2018b) to use a black-box, instead of an interpretable classifier, which can be explained through model-agnostic tools, such as LIME (Ribeiro et al., 2016 ).", |
| "cite_spans": [ |
| { |
| "start": 30, |
| "end": 52, |
| "text": "Pryzant et al. (2018b)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 179, |
| "end": 200, |
| "text": "(Ribeiro et al., 2016", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We utilise our methodology to extract insights about the peer-review process of ICLR 2017, given that certain assumptions hold. Those insights validate our perceptions of how the peer-review process works, indicating that the method provides meaningful reasoning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We develop novel models for the task of peer review classification, which achieve a 15.79% absolute increase in accuracy over previous state-of-the-art (Ghosal et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 154, |
| "end": 175, |
| "text": "(Ghosal et al., 2019)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In experiments with a dataset from ICLR 2017, we found that the organising committee mostly followed the recommendations of the reviewers, and that the reviewers focused on aspects such as originality, clarity, impact, and soundness of a paper, when suggesting whether or not the paper should be accepted for publication.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The paper is organised as follows: Section 2 presents related work regarding computational analyses of the peer-review process and explainability in NLP. Section 3 gives an overview of PeerRead, the peer review dataset we used in this paper. Section 4 presents our initial exploration on explanation of peer-reviewing, while Section 5 describes our methodology to account for causality by generating deconfounded lexicons.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Peer Review Analysis. The PeerRead dataset (Kang et al., 2018) is the only openly available peer review dataset. It contains submission data for arXiv and the NeurIPS, ICLR, ACL, and CoNLL conferences, however, review data for both accepted and rejected submissions exist only for ICLR. The authors devised a series of NLP prediction baselines on the tasks of paper acceptance based on engineered features, as well as reviewer score prediction based on modelling abstract and review texts. Improvements on these baselines have been proposed by using better text representations as well as joint abstract/review modelling for predicting paper acceptance (Ghosal et al., 2019; Wang and Wan, 2018) . Stappen et al. (2020) revisited the latter task on a dataset containing all the Interspeech 2019 conference abstracts and reviews (which unfortunately is not available) by utilising a review text fusion mechanism. The aforementioned studies model correlations between textual indices and the desired targets, without attempts towards explainability or identification of causal relationships. Hua et al. (2019) , utilise argumentation mining on review texts from the ICLR and UAI conferences. Whereas their methodology focuses on proposition type statistics and transitions, it is a distinct approach to understanding peer-reviewing compared to ours, which is based on XAI.", |
| "cite_spans": [ |
| { |
| "start": 43, |
| "end": 62, |
| "text": "(Kang et al., 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 653, |
| "end": 674, |
| "text": "(Ghosal et al., 2019;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 675, |
| "end": 694, |
| "text": "Wang and Wan, 2018)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 697, |
| "end": 718, |
| "text": "Stappen et al. (2020)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1089, |
| "end": 1106, |
| "text": "Hua et al. (2019)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Interpretability in NLP. Interpreting aspects of the input as conducive to the model prediction aims to: a) increase model trustworthiness, and b) allow for greater understanding of the data (Molnar, 2019) . One way to do this is via local explanations, which explain the result of a single prediction, such as the view of high, learnt, class-agnostic attention weights (Bahdanau et al., 2015) applied to Recurrent Neural Network (RNN) hidden states, as proxies of word importance. However, the use of attention weights as explanations has been controversial, as Jain and Wallace (2019) have pointed out that for the same prediction, there can be counterfactual attentional explanations. Inversely, Wiegreffe and Pinter (2019) offer the justification that there may indeed exist multiple plausible explanations, out of which the attention mechanism captures one. An additional limitation of the attention mechanism is the inability to provide class-specific insights, due to the use of the softmax activation function. Alternatively, the predictions of blackbox classifiers can be interpreted through modelagnostic frameworks, such as LIME (Ribeiro et al., 2016) and SHAP (Lundberg and Lee, 2017).", |
| "cite_spans": [ |
| { |
| "start": 191, |
| "end": 205, |
| "text": "(Molnar, 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 370, |
| "end": 393, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1140, |
| "end": 1162, |
| "text": "(Ribeiro et al., 2016)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We are interested in global explanations, which explain either a set of predictions or provide insights about the general behaviour of the model. Current research is limited either to feature-based classifiers (Altmann et al., 2010) , image classifiers (Kim et al., 2017) or combining local explanations to generate global explanations (Lundberg et al., 2019; Ibrahim et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 210, |
| "end": 232, |
| "text": "(Altmann et al., 2010)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 253, |
| "end": 271, |
| "text": "(Kim et al., 2017)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 336, |
| "end": 359, |
| "text": "(Lundberg et al., 2019;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 360, |
| "end": 381, |
| "text": "Ibrahim et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Causal Inference in NLP. Let us assume two observed random variables: the treatment T and the outcome Y . Correlation between those two variables does not necessarily entail causation. Instead, this correlation could be the product of a confounding effect: a third random variable C causes both of the observed random variables (Peters, 2017) . A depiction of the confounding effect can be observed in Figure 1 . Recently, researchers have provided frameworks for treating text as a confounder, but their approaches are limited to binary treatment variables (Veitch et al., 2019; Roberts et al., 2020; Saha et al., 2019) . Keith et al. (2020) have compiled a review of research that utilises text for adjusting for confounding. Fong and Grimmer (2016) explore extracting treatments from text corpora and estimating their causal effect on human decisions. However, they require human annotators for the constructions of training and test sets. Pryzant et al. (2017 Pryzant et al. ( , 2018a examine the problem of identifying a deconfounded lexicon (a set of linguistic features such as words or n-grams) from the text, which acts as a treatment variable. A deconfounded lexicon is \"predictive of a target variable\" but \"uncorrelated to a set of confounding variables\" (Pryzant et al., 2018b) . However, their work is limited to using nominal confounders. In this paper, we explore the use of natural language both as the treatment variable and the confounder.", |
| "cite_spans": [ |
| { |
| "start": 328, |
| "end": 342, |
| "text": "(Peters, 2017)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 558, |
| "end": 579, |
| "text": "(Veitch et al., 2019;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 580, |
| "end": 601, |
| "text": "Roberts et al., 2020;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 602, |
| "end": 620, |
| "text": "Saha et al., 2019)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 623, |
| "end": 642, |
| "text": "Keith et al. (2020)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 728, |
| "end": 751, |
| "text": "Fong and Grimmer (2016)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 943, |
| "end": 963, |
| "text": "Pryzant et al. (2017", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 964, |
| "end": 988, |
| "text": "Pryzant et al. ( , 2018a", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1267, |
| "end": 1290, |
| "text": "(Pryzant et al., 2018b)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 402, |
| "end": 410, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The PeerRead dataset (Kang et al., 2018) consists of 14.7k papers and 10.7k peer reviews, including the meta-reviews by the editor. The papers are from different venues, having been collected with different methods, which leads to a non-uniform dataset. For instance, arXiv papers do not con-tain any reviews, and consequently, they are out of scope for our study. Furthermore, the NeurIPS section contains 2k reviews for only accepted papers. Therefore, using them will lead to a significant imbalance in our data. For those reasons, we decided to examine the 1.3k ICLR 2017 reviews of PeerRead. The ICLR 2017 section is divided into training, validation and test sets with an 80%-10%-10% split. In the Appendix, we can observe the proportion of the accepted to rejected paper in the various partitions of ICLR 2017.", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 40, |
| "text": "(Kang et al., 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "PeerRead suffers from various data quality issues, which we have resolved. Firstly, we have removed several empty and duplicate reviews. More importantly, for ICLR 2017 the meta-reviews which contain the review and final decision of the conference chair are not marked as meta-reviews (the meta-review boolean field is marked as false) as they should have been (Kang et al., 2018) . Instead, they are all marked as normal reviews with the title \"ICLR Committee Final Decision\". We explicitly treat them as meta-reviews, a differentiation that we believe is crucial in an XAI study like ours. Notably, subsequent studies that utilise Peer-Read, like DeepSentiPeer (Ghosal et al., 2019) , do not mention whether they have addressed this issue. This hinders a direct comparison with results from DeepSentiPeer as, expectedly, according to our experiments, the use of the meta-reviews significantly increases the performance of classifiers.", |
| "cite_spans": [ |
| { |
| "start": 361, |
| "end": 380, |
| "text": "(Kang et al., 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 663, |
| "end": 684, |
| "text": "(Ghosal et al., 2019)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We make an initial exploration into global explanations in the form of lexicons by mapping each word to an aggregate of scores corresponding to local explanations of PeerRead test sample predictions, as suggested both by the creators of SHAP (Lundberg et al., 2019) and by Pryzant et al. (2018b) . We follow a train-test split setup in our modelling experiments, and only extract explanations from the test set, following Molnar et al. (2020) .", |
| "cite_spans": [ |
| { |
| "start": 273, |
| "end": 295, |
| "text": "Pryzant et al. (2018b)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 422, |
| "end": 442, |
| "text": "Molnar et al. (2020)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Exploration with Global XAI", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We experimented with three different classification tasks: a) the Final Decision Peer Review Classifier fuses the multiple peer reviews to predict the final acceptance decision for a paper, b) the Meta-Review Classifier uses the singular meta-review to predict the final acceptance decision for a paper, and c) the Individual Peer Review Classifier predicts the recommendation of the reviewer for a single review, where we consider as accepted the papers with scores above 5, given a range of 1-10.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Exploration with Global XAI", |
| "sec_num": "4" |
| }, |
| { |
| "text": "After preliminary experiments with Transformer Language Models (TLMs), we have opted to use the SciBERT SciVocab uncased model (Beltagy et al., 2019) trained on 1.7M papers from Semantic Scholar that contain a total of 3.17B tokens for text representation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interpreting Two Classifier Architectures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We experimented with two different text modelling approaches, each allowing for a different local explanation generation technique. In our first approach, we used the [CLS] token of our SciBERT model for review text representation. We truncate the end of reviews longer than 512 words. The effect of this truncation is not extremely adverse since only 10.5% of the reviews exceed the maximum length. More details about the length of peer reviews are included in the Appendix. We are not fine-tuning our model due to the risk of overfitting on our small dataset of 1.3k reviews. For the Meta Review and the Individual Review Classifiers, an additional feed forward neural layer is required, followed by a sigmoid activation function to predict the decisions. In the case of the Final Decision Peer Review Classifier, multiple reviews exist, leading to an equal number of SciBERT representations. In order to avoid introducing an arbitrary ordering among the reviews, we fused them into a single representation using an attention mechanism, following Stappen et al. (2020) . We used two more attention layers to produce variant fused embeddings, and concatenated them into a single vector of fixed dimensionality in order to simulate multiheaded attention (Vaswani et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 172, |
| "text": "[CLS]", |
| "ref_id": null |
| }, |
| { |
| "start": 1049, |
| "end": 1070, |
| "text": "Stappen et al. (2020)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1254, |
| "end": 1276, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interpreting Two Classifier Architectures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For the second approach, we only explore the Meta-Review and Individual Peer Review Classifiers. We treat each review as a sequence of word representations given by SciBERT, which we further process using an RNN layer with a Gated Recurrent Unit cell (GRU), followed by an attention mechanism, which is used to provide interpretability to our model. The output of the RNN layer is a vector produced through attention pooling. This vector is input to a feed-forward layer to produce the classification decision.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interpreting Two Classifier Architectures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For the SciBERT [CLS] approach, we treat our model as a black-box and use LIME (Ribeiro et al., 2016) to produce explanations for each test set sample. More important words will have larger absolute scores. For the GRU-based approach, the attention mechanism is used to generate local explanations. More important words will have larger weights. Global explanation scores are then computed as the average of either the LIME score or the attention weights. The implementation details of our model are discussed in the Appendix.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interpreting Two Classifier Architectures", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The validity of the explanations in drawing conclusions about peer reviewing is tied to the model predictive performance (Molnar et al., 2020) . Table 1 summarises the performance on the PeerRead held-out test set for the task of predicting the final acceptance recommendation. Although not directly comparable, as explained in Section 3, we also report the baseline performances of PeerRead (Kang et al., 2018) and DeepSentiPeer (Ghosal et al., 2019) , as well as that of the Majority Baseline, according to which the prediction is equal to the multiple reviewer recommendation majority vote. In the case of Individual Peer Review classification, this aggregate prediction is the same for all review samples pertaining to the same paper. We achieve superior performance over the baselines, both when using the meta-review and when fusing all the peer reviews for a specific paper. We note that the meta-review based model is the highest performer as the text is expected to correspond very well to the final recommendation. However, even our regular review fusion model outperforms DeepSentiPeer by a 15.79% absolute increase in accuracy. We hypothesise that this happens due to DeepSentiPeer treating each review (even along with the paper) as a different sample; we use fusion. Another reason for this improvement is presumably the use of the SciBERT model which was finetuned on biomedical and computer science papers. Table 2 summarises the results for the Individual Peer Review Classification task. From the above experiments, we see that the SciBERT [CLS] approach is superior to sequential GRU.", |
| "cite_spans": [ |
| { |
| "start": 121, |
| "end": 142, |
| "text": "(Molnar et al., 2020)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 392, |
| "end": 411, |
| "text": "(Kang et al., 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 430, |
| "end": 451, |
| "text": "(Ghosal et al., 2019)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1423, |
| "end": 1430, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In Table 3 , we present the top 50 important words in the peer reviews for the Individual Peer Review Classification. In the Appendix, we include explanations for the other models. For both meta-reviews and peer reviews, we observe various technical terms (\"lda\", \"gans\", \"variational\", \"generative\", \"adversarial\", \"convex\", etc.). While this indicates a positive correlation between some subjects (such as GANs) and the acceptance to ICLR 2017, it is hard to argue about a causal relationship. For instance, a possible non-causal relationship could be the following confounding effect: top re- searchers perform novel research, such as GANs, and top researchers have a higher probability of having their work published. Furthermore, we observe plenty of terms that seem to directly criticise the quality of the work accomplished in a paper: \"efficiently\", \"confusing\", \"unconvincing\", \"superficial\", \"comprehensive\", \"systematically\", \"carefully\", \"untrue\" and more. Again, it is easy to make the mistake of assuming a causal relationship between the \"efficiency\" of an algorithm, suggested in a paper, and the acceptance for publication. For instance, depending on the subject of a paper, the efficiency may or may not increase the chances of a publication. To elaborate, for a paper about real-time language interpretation, the efficiency of the algorithm may directly influence the acceptance decision. On the contrary, the efficiency of training a GAN model may be inconsequential, as far as the ICLR reviewers are concerned.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\"Unjustified Causal Interpretation\" is one of the pitfalls of global explanations (Molnar et al., 2020) . Simply put, global explanations detect correlation and correlation does not entail causation. In order to learn what leads to a scientific paper being accepted for publication, we need causality. For instance, words such as \"efficiency\" and \"novelty\" appear to be important. However, there may not exist a causal relationship between them and the acceptance of a paper. Specifically, more theoretical subjects may demand \"novelty\", and the chances of a theoretical subject being accepted may be greater. Therefore, the correlation of the novelty and the acceptance of a paper may be an artefact of this confounding effect on the subject of the paper.", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 103, |
| "text": "(Molnar et al., 2020)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Producing Causal Explanations", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Adjusting for every possible confounding and mediating effect is not possible for most machine learning models, partially due to lack of ground truth data. Causality in machine learning is limited to taking several strong assumptions and providing a causal interpretation, for as long as those assumptions hold (Molnar et al., 2020) . When text is involved, even more assumptions must be made, due to its high dimensionality (Veitch et al., 2019) . We assume that: a) the subject of a paper is sufficient to identify the causal effect, b) the abstract can act as a proxy to the subject, and c) the embedding method extracts information relevant to the subject. In Section 5.4, we discuss the validity and limitations of those assumptions. Even when such assumptions are violated, our methodology is meaningful since it allows disentangling explanations of text classifiers from the natural language embedding of the text we want to control.", |
| "cite_spans": [ |
| { |
| "start": 311, |
| "end": 332, |
| "text": "(Molnar et al., 2020)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 425, |
| "end": 446, |
| "text": "(Veitch et al., 2019)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Producing Causal Explanations", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In Figure 1 , we depict the confounding effect. In this context, Y is the target variable (e.g. the acceptance of a paper for publication), T is the text (e.g. a peer review), and C are the confounding variables (e.g. the subject of a paper).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 11, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The task is to extract a deconfounded lexicon L such that L(T ) (the set of words of L that exist in the text T ) is correlated to the target variable Y but not to the confounders C (Pryzant et al., 2018a) . For our purposes, this means that the extracted lexicon can offer explanations about the acceptance of a paper for publication regardless of the subject of a paper. Pryzant et al. (2018a) have introduced the concept of Informativeness Coefficient I(L):", |
| "cite_spans": [ |
| { |
| "start": 182, |
| "end": 205, |
| "text": "(Pryzant et al., 2018a)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 373, |
| "end": 395, |
| "text": "Pryzant et al. (2018a)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "I(L) = E[Var[E[Y |L(T ), C]]|C] where Var[E[Y |L(T ), C]]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "is the amount of information in the target variable Y that can be explained both by L(T ) and C. In order to extract a deconfounded lexicon L, we must maximise the Informativeness Coefficient I(L). The use of I(L) as an evaluation of the quality of causal explanations is motivated by the ability of I(L) to measure the causal effect of the text T on Y , under certain assumptions (Pryzant et al., 2018b) .", |
| "cite_spans": [ |
| { |
| "start": 381, |
| "end": 404, |
| "text": "(Pryzant et al., 2018b)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "To further understand this concept, let us consider a lexicon L of fixed length. L(T ) contains either words descriptive of T \\ C or words descriptive of T \u2229 C. Words related to the confounders (from T \u2229 C), in theory, do not increase the Informativeness Coefficient since the confounder C is treated as a fixed variable that directly affects Y . For instance, the word \"gan\" in a peer review may not be as important when the subject of the paper (the confounder) is already taken into consideration. On the contrary, words like \"enjoyable\", which are potentially unrelated to the subject of the paper, can contain more information. Therefore, the fewer the words in the lexicon that are related to the confounders C, the more deconfounded a lexicon will be, and I(L) will take larger values.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "An ANOVA decomposition is used to write the Informativeness Coefficient of a lexicon I(L) (Pryzant et al., 2018a) :", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 113, |
| "text": "(Pryzant et al., 2018a)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "I(L) = E[(Y \u2212 E[Y |C]) 2 ] \u2212 E[(Y \u2212 E[Y |L(T ), C]) 2 ]", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Background", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In practice, I(L) can be estimated by fitting a logistic regression on the confounders C to predict the outcome Y and a logistic regression on both C and L(T ) (a Bag-of-Word created from the lexicon and the text T ). Then, for binary classification, the Binary Cross Entropy error is used to measure I(L):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "I(L) BCE C \u2212 BCE L(T ),C", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Deep Residualisation (DR) (Pryzant et al., 2018a) is a method for extracting a deconfounded lexicon. A visualisation of the DR model can be seen in Figure 2 . The main idea is to utilise a neural model to predict the target variable\u0176 using only nominal confounders C, and an interpretable model that encodes the text T into an embedding vector e. Then, the intermediate prediction\u0176 is concatenated with the embedding vector e and forwarded through a feed-forward network to produce the final prediction\u0176 . In order to train this model, two loss functions are used. Firstly, the errors from the intermediate prediction\u0176 (i.e. BCE C ) are propagated through the Intermediate Prediction Neural Network. Secondly, the errors from the final predic-tion\u0176 (i.e. BCE T,C ) are propagated through the complete neural network (Pryzant et al., 2018a) . (Pryzant et al., 2018a) . The confounders C are used for intermediate predictions Y . The treatment T (i.e. the review of a paper) is encoded to a vector e, which is concatenated with Y to produce the final prediction Y .", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 49, |
| "text": "(Pryzant et al., 2018a)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 816, |
| "end": 839, |
| "text": "(Pryzant et al., 2018a)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 842, |
| "end": 865, |
| "text": "(Pryzant et al., 2018a)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 148, |
| "end": 156, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The DR architecture and the choice of loss functions are directly influenced by the Informativeness Coefficient. Specifically, Equation 1 provides additional intuition behind the Informativeness Coefficient: I(L) \"measures the ability of a lexicon L(T ) to enhance the predictions of the outcome Y made from the confounders C\" (Pryzant et al., 2018b) . The DR architecture utilises C to produce intermediate Y predictions, while the text T is used to improve upon the intermediate predictions (Pryzant et al., 2018b) . Intuitively, if the encoder learns to focus on aspects of the text, which are unrelated to the confounders but predictive of the outcome, the improvement of the intermediate predictions, and consequently I(L), will be maximised.", |
| "cite_spans": [ |
| { |
| "start": 327, |
| "end": 350, |
| "text": "(Pryzant et al., 2018b)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 493, |
| "end": 516, |
| "text": "(Pryzant et al., 2018b)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Although there may exist multiple confounders to the outcome (e.g., reviewer guidelines, or the paper content itself), we focus specifically on the subject of the paper as a confounder. Such annotation, however, is not part of the available data. Similar to the study performed in Veitch et al. (2019) , we assume that the paper abstract is predictive of its subjects, and opt to utilise it for adjusting for confounding.", |
| "cite_spans": [ |
| { |
| "start": 281, |
| "end": 301, |
| "text": "Veitch et al. (2019)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Submission Subject as a Confounder", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "A limitation of the work of (Pryzant et al., 2018b ) is that they only consider nominal confounders, which they forward through a Multi-layer Perceptron (MLP). We have extended the DR algorithm to extract deconfounded explanations from the reviews while using learnt natural language embeddings as the confounder. Specifically, we are forwarding our confounder (the abstract text) through a SciBERT Language Model to produce an embedding and then, forward that embedding through an MLP, with a sigmoid activation function at the end, to predict the intermediate classification outcom\u00ea Y . For the task of meta-review classification,\u0176 aims to predict the final acceptance outcome, and for the task of individual peer review classification,\u0176 aims to predict the recommendation of the reviewer. Similarly with Veitch et al. 2019, we assume that the embedding method is able to extract information relevant to the subject of the paper.", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 50, |
| "text": "(Pryzant et al., 2018b", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Submission Subject as a Confounder", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Firstly, we have extended the DR+ATTN and DR+BoW models (Pryzant et al., 2018b) to allow for treating of natural language as the confounder, using the aforementioned methodology. The DR+ATTN model utilises an RNN-GRU layer with an attention mechanism to encode the text into the vector e. Therefore, interpretability is achieved through the attention mechanism. The DR+BoW model, forwards a Bag-of-Words (BoW) representation of the text through a single linear neural layer to produce an one-dimensional vector e. Each feature of our model is an n-gram that may appear on the text of the review. Therefore, we can globally interpret our model by using the weight of an ngram in the single linear layer as the importance of that word (Pryzant et al., 2018b) .", |
| "cite_spans": [ |
| { |
| "start": 56, |
| "end": 79, |
| "text": "(Pryzant et al., 2018b)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 733, |
| "end": 756, |
| "text": "(Pryzant et al., 2018b)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Submission Subject as a Confounder", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Secondly, instead of an interpretable model, we utilise a black-box that encodes the text into the vector e and then, use LIME to explain the predictions of our classifier. Therefore, we have developed the DR+LIME variant which can be used to extract a deconfounded lexicon from BERT-based models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Submission Subject as a Confounder", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In Table 4 , we present metrics for benchmarking the performance of our various lexicons. In order to add more clarity, apart from the Informativeness Coefficient I(L), we include the performance (F1-score) of the different logistic regression models. Furthermore, we report the Informativeness Coefficient of the lexicons generated by the Logis-tic Regression (LR) and the Logistic Regression with Confound features (LRC) (McNamee, 2005) baselines.", |
| "cite_spans": [ |
| { |
| "start": 423, |
| "end": 438, |
| "text": "(McNamee, 2005)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 4", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The Informativeness Coefficient is useful for measuring the degree to which the lexicons are deconfounded. However, the metric by itself is useful for comparing lexicons of fixed size and for a specific task. Therefore, comparing the I(L) of the meta-review with the one from peer reviews is an uneven comparison. The reason for this is that, as we have observed, classifying meta-reviews is a much simpler problem. This leads to lexicons that perform much better and therefore, having higher I(L) values even when words related to the confounders persist in the lexicon.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "An important observation is that the deconfounded lexicons are less predictive of the final outcome compared to the non-causal lexicons, when not combined with the Confounder. For instance, in the case of Peer-Review GRU model, the F1-Score of L(T ) drops from 60% to 59% with Deep Residualisation. On the contrary, when the deconfounded lexicons are coupled with the confounders, the logistic regression performs better compared to the non-causal lexicon. For the Peer-Review GRU model, the F1-Score of L(T ), C increases from 60% to 64% with Deep Residualisation. The intuition behind this is that the initial lexicon had some words related to the confounders that were important for the final prediction. The DR method does not attend to those words because they are related to the confounders. Instead, they are replaced with words, which may be less predictive of the final outcome, yet uncorrelated to the confounders.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Ultimately, DR+LIME and DR+BoW are less successful than DR+ATTN in generating a deconfounded lexicon for the peer reviews since the lexicon generated from DR+ATTN achieves a superior Informativeness Coefficient. Still, our DR+LIME technique manages to produce a more informative lexicon than the DR+BoW model and the LRC baseline.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Causal inference is limited to making several assumptions and providing causal guarantees for as long as those assumptions hold (Molnar et al., 2020; Pryzant et al., 2018a; Veitch et al., 2019) . We assume that:", |
| "cite_spans": [ |
| { |
| "start": 128, |
| "end": 149, |
| "text": "(Molnar et al., 2020;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 150, |
| "end": 172, |
| "text": "Pryzant et al., 2018a;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 173, |
| "end": 193, |
| "text": "Veitch et al., 2019)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion on our Assumptions", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "1. The subject of a paper is sufficient to identify the causal effect, i.e., there is no unobserved attempt, comprehensive, interfere, variational, entirely, systematically, perfect, sure, lda, formalize, tolerant, valid, belief, gan, imposible, adagrad, semantically, aware, clear, carefully, j., and/or, arguably, fully, offer, vs., confident, message, object, receive, probably, multimodality, strictly, directly, twitter, join, e.g., observe, complete, explain, novelty, dualnets, convince, handle, nintendo, theorem, satisfy, ready, demand identical, sure, premise, carefully, heavy, specify, tease, interfere, necessarily, adagrad, attempt, novelty, fully, repeat, systematically, not, gray, theoretically, anymore, role, belief, recommendation, almost, consistently, primary, subtraction, good, satisfactory, background, write, compute, easy, teach, enough, convince, except, ready, explain, usage, sell, unfortunately, arguably, yet, especially, elegantly, sufficiently, handle, cdl, setup, reception interspeech, zhang, prons, dismissed, third, p6, confusing, resulting, geometry, unconvincing, honestly, not, community, cannot, superficial, readership, big, suggestion, dialogue, revisit, bag, analysed, icml, taken, typo, submitted, energy, nice, spirit, competitive, per, highlighted, multimodality, far, 04562, lack, preprint, dcgan, conduct, word2vec, wen, gan, rejected, start, towards, multiplication, generalisation, auxiliary, parametric, enjoyed interspeech, dismissed, prons, geometry, p6, submitted, unfair, unconvincing, readership, honestly, not, spirit, confusing, analysed, bag, unable, cannot, rejected, insightful, multiplication, highlighted, enjoyed, misleading, disagree, dialogue,mu_, wen, lack, nice, multimodality, welcome, conduct, recommender, encourage, dualnets, thanks, cdl, preprint, 04562, enjoy, revisit, community, appreciate, principled, medical, coding, drnn, accompanying, factorization, jmlr Table 3 : Top 50 salient words for Individual Peer Review Classification. We present class-agnostic explanations both for some of the non-causal models (Section 4) and for some DR models (Section 5).", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 1937, |
| "text": "attempt, comprehensive, interfere, variational, entirely, systematically, perfect, sure, lda, formalize, tolerant, valid, belief, gan, imposible, adagrad, semantically, aware, clear, carefully, j., and/or, arguably, fully, offer, vs., confident, message, object, receive, probably, multimodality, strictly, directly, twitter, join, e.g., observe, complete, explain, novelty, dualnets, convince, handle, nintendo, theorem, satisfy, ready, demand identical, sure, premise, carefully, heavy, specify, tease, interfere, necessarily, adagrad, attempt, novelty, fully, repeat, systematically, not, gray, theoretically, anymore, role, belief, recommendation, almost, consistently, primary, subtraction, good, satisfactory, background, write, compute, easy, teach, enough, convince, except, ready, explain, usage, sell, unfortunately, arguably, yet, especially, elegantly, sufficiently, handle, cdl, setup, reception interspeech, zhang, prons, dismissed, third, p6, confusing, resulting, geometry, unconvincing, honestly, not, community, cannot, superficial, readership, big, suggestion, dialogue, revisit, bag, analysed, icml, taken, typo, submitted, energy, nice, spirit, competitive, per, highlighted, multimodality, far, 04562, lack, preprint, dcgan, conduct, word2vec, wen, gan, rejected, start, towards, multiplication, generalisation, auxiliary, parametric, enjoyed interspeech, dismissed, prons, geometry, p6, submitted, unfair, unconvincing, readership, honestly, not, spirit, confusing, analysed, bag, unable, cannot, rejected, insightful, multiplication, highlighted, enjoyed, misleading, disagree, dialogue,mu_, wen, lack, nice, multimodality, welcome, conduct, recommender, encourage, dualnets, thanks, cdl, preprint, 04562, enjoy, revisit, community, appreciate, principled, medical, coding, drnn, accompanying, factorization, jmlr", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1938, |
| "end": 1945, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion on our Assumptions", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "GRU (Non-Causal) GRU (DR+ATTN) SciBERT (Non-Causal) SciBERT(DR+LIME) not,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion on our Assumptions", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Model", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Type", |
| "sec_num": null |
| }, |
| { |
| "text": "I(L)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Type", |
| "sec_num": null |
| }, |
| { |
| "text": "Logistic Regression Macro F1-Score Only Text", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Type", |
| "sec_num": null |
| }, |
| { |
| "text": "Only Conf. Text and Conf. confounding. This is a standard causality assumption, albeit a strong one (Veitch et al., 2019) . This assumption essentially means that there should not be other external confounders. The first way to attack this assumption is to argue that a reviewer could have biases that affect their decisions. Those biases can act as external confounders. For instance, a famous author may receive preferential treatment from some reviewers. Indeed, in \"single-blind\" peer reviews, as was the case for ICLR 2017, it has been observed that reviewers tend to accept more papers from top universities and companies (Tomkins et al., 2017 ), compared to \"double-blind\" peer reviews, where the author is anonymous during the review process. However, it is practically impossible to adjust for every potential source of reviewer bias. Another way to attack this assumption is by arguing that the writing quality, for instance, could be another confounder.", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 121, |
| "text": "(Veitch et al., 2019)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 628, |
| "end": 649, |
| "text": "(Tomkins et al., 2017", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Type", |
| "sec_num": null |
| }, |
| { |
| "text": "L(T ) C L(T ), C", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Type", |
| "sec_num": null |
| }, |
| { |
| "text": "Nevertheless, when a paper lacks clarity, we can assume that the reviewer will point out the poor writing quality in the review. In that case, the review text accounts for the writing quality and therefore, it is not an external confounder.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Type", |
| "sec_num": null |
| }, |
| { |
| "text": "2. The abstract of the paper can act as a proxy to its subject. Indeed, an abstract should reflect the main topic of the paper. In that setting, the abstract can be thought of as a \"noisy realisation\" of the subject (Veitch et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 216, |
| "end": 237, |
| "text": "(Veitch et al., 2019)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Type", |
| "sec_num": null |
| }, |
| { |
| "text": "3. The embedding method is able to extract information relevant to the subject of the paper. The use of state-of-the-art language models justifies this assumption (Veitch et al., 2019) . In this case, we are using SciBERT embeddings, which achieve state-of-the-art performance on scientific NLP tasks (Beltagy et al., 2019) . We could further bolster this assumption by fine-tuning our SciBERT model. However, this would require a larger dataset of peer reviews to avoid overfitting.", |
| "cite_spans": [ |
| { |
| "start": 163, |
| "end": 184, |
| "text": "(Veitch et al., 2019)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 301, |
| "end": 323, |
| "text": "(Beltagy et al., 2019)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Type", |
| "sec_num": null |
| }, |
| { |
| "text": "4. The outcome Y (t) is generated from the following data generated process: Y (t) = f c (t) + , where t is the text and c the fixed observed confounders. This is a standard assumption in observational studies, and along with the assumption that there is no unobserved confounding, they guarantee that the Informativeness Coefficient measures the causal effect of the full text T on the outcome Y (Pryzant et al., 2018b) .", |
| "cite_spans": [ |
| { |
| "start": 397, |
| "end": 420, |
| "text": "(Pryzant et al., 2018b)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Type", |
| "sec_num": null |
| }, |
| { |
| "text": "As long as our assumptions hold, we can causally interpret the explanations of our models. Since we value the degree with which DR has managed to deconfound our lexicon, the explanations for the peer reviews should be extracted from DR+ATTN, which achieves the greater I(L) values.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexicons Inspection", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "In Table 3 , we can observe a comparison of the top 50 salient words between the causal and noncausal models for the peer reviews. In the first place, we can observe that in the top 50 explanations of DR+ATTN, technical words, like \"generative\", \"variational\" and \"lda\" disappear in the explanations of the causal model. This occurrence fits our expectations: the technical words are generally related to the confounder (the subject of a paper) and should not appear in the explanation. As far as the BERT-based causal model is concerned, its lexicon still has plenty of technical terms. This is because the DR+LIME method is less successful in removing the confounding effect.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Lexicons Inspection", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "For the meta-reviews, we notice the prevalence of words like \"enjoyed\", \"accepted\", \"mixed\", \"positively\" and \"recommend\". Those words are, for the most part, related with to the opinions of the peer reviewers (e.g. \"The reviewers all enjoyed reading this paper\"). Furthermore, a logistic regressor fitted on the average recommendation score of the reviewers is able to predict the final outcome of the submission with an accuracy of 95%. All this evidence suggests that the ICLR 2017 committee chair for the vast majority of papers followed the recommendation of the reviewers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexicons Inspection", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "In the case of peer reviews, highly ranked in our explanations, we can observe the word \"novelty\". We can deduce that the Originality of a paper is indeed essential for ICLR 2017. In the top 50 words we observe words such as \"explain\", \"carefully\", \"systematically\", \"consistently\", \"convince\" and \"sufficiently\". Those words indicate the importance of the Soundness of the scientific approach and how convincingly the claims are presented. Moreover, in the top 100 words, there words that reflect the Clarity and the quality of writing: \"elegantly\", \"polish\", \"readable\", \"clear\". We can, also, detect words like \"comprehensive\" and \"extensive\", which reflect the Substance (and completeness) of a paper. Lastly, various words exist that potentially reflect other aspects of a paper, such as Meaningful Comparison (\"compare\"), Impact (\"contribute\") and Appropriateness (\"appropriate\").", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexicons Inspection", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "Therefore, if our assumptions hold, we can claim that certain characteristics of a paper, such as Originality, Soundness and Clarity, lead the reviewers to recommend that a scientific paper is accepted for publication.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexicons Inspection", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "With peer-reviewing as a use case, we have proposed a framework for interpreting the decisions of text classifiers causally, by using natural language to adjust for confounding effects. Our methodology succeeds in extracting deconfounded lexicons that exclude terms related to the paper subject (i.e., the confounders), a task where established, noncausal global explanation approaches fail. Prior work was limited to either nominal confounders (Pryzant et al., 2017 (Pryzant et al., , 2018a or binary treatment variables (Veitch et al., 2019) . Furthermore, we have extended the Deep Residualisation (Pryzant et al., 2018a) to allow for black-box models.", |
| "cite_spans": [ |
| { |
| "start": 445, |
| "end": 466, |
| "text": "(Pryzant et al., 2017", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 467, |
| "end": 491, |
| "text": "(Pryzant et al., , 2018a", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 522, |
| "end": 543, |
| "text": "(Veitch et al., 2019)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 601, |
| "end": 624, |
| "text": "(Pryzant et al., 2018a)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "As is standard in causal inference (Molnar et al., 2020) , the causal guarantees of the deconfounded lexicon we generate is limited by certain assumptions. Future work could explore strengthening our assumptions through fine-tuning of the Transformer Language Model representation of the confounder text. Furthermore, fine-tuning the Encoder of DR+LIME may lead to producing more informative lexicons. To avoid overfitting in this finetuning process, we intend to apply our methodology on larger datasets that the one used in this paper for peer reviews. ", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 56, |
| "text": "(Molnar et al., 2020)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Dataset statistics are summarised in Table 5 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 37, |
| "end": 44, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Dataset statistics", |
| "sec_num": null |
| }, |
| { |
| "text": "In the section, we provide implementation details such as the number of neurons in our networks and the training hyperparameters. All hidden layers of our neural networks have a ReLU activation function, and the output layers have a Sigmoid activation function. Let us first focus on the non-causal SciBERT [CLS] approach. For the Final Decision Peer Review Classifier, we are using two attention layers and therefore the concatenated fused embeddings 768 * 2 = 1536 dimensions. The first layer of the MLP has 1536 input neurons and 128 output neurons. The second layer has 128 input neurons and 64 output neurons. The third (and last) layer has 64 input neurons and 1 output neuron. The probability of the Dropout for each layer is 20%. The Individual Decision Peer Review Classifier and the Meta-Review Classifier have essentially the same MLP as the Final Decision Peer Review Classifier but with 768 input neurons. For all SciBERT [CLS] classifiers, we are using Binary Cross Entropy as the loss function and an Adam optimiser (Kingma and Ba, 2014) with a learning rate of 1 \u00d7 10 \u22124 . However, we are training our classifiers for a different amount of epochs and with different batch sizes. For the Final Decision Peer Review Classifier, we are training for 500 epochs with a batch size of 100. The Meta-Review Classifier is trained for the 100 epochs, with a batch size of 100. Finally, the Individual Peer Review Classifier has 220 training epochs and a batch size of 300 samples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Implementation Details", |
| "sec_num": null |
| }, |
| { |
| "text": "Focusing on the parameters of the non-causal GRU classifiers, the GRU cell has 768 input neurons (the size of the BERT embedding) and 30 output neurons. The first layer of the MLP has 30 input neurons and 16 output neurons. The last layer has 16 input neurons and 1 output neuron. The probability of the Dropout for each layer is 20%. Similarly with before, we are using a Binary Cross Entropy loss function with an Adam optimiser. The Individual Peer Review classifier has a learning rate of 5 \u00d7 10 \u22124 and is trained for 80 training epochs with a batch size of 500 samples. The Meta-Review classifier has a learning rate of 8 \u00d7 10 \u22125 , is trained for 70 epochs and has a batch size of 30 samples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Implementation Details", |
| "sec_num": null |
| }, |
| { |
| "text": "Focusing on the causal DR models, all models have the same architecture for the MLP that produces the intermediate predictions\u0176 . The first layer has 768 input neurons, 10 output neurons. The second layer has 10 input neurons, 5 output neurons. The last layer is going to have 5 input neurons, 1 output neuron.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Implementation Details", |
| "sec_num": null |
| }, |
| { |
| "text": "In the case of the DR+LIME causal model, the MLP that produces the embedding e has 2 layers. The first one has 768 input neurons and 300 output neurons. The second layer has 300 input neurons and 100 output neurons. The embedding e is constructed as a 100 dimensional vector. The MLP that predicts the final acceptance decision has 3 layers. The first layer has 101 input neuron, 64 output neurons. The second one has 64 input neurons and 16 output neuron. The last layer has 16 input neurons and 1 output neuron. An Adam optimiser is used, with a learning rate of 1 \u00d7 10 \u22124 . The model is trained for 125 epochs, with a batch size of 300.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Implementation Details", |
| "sec_num": null |
| }, |
| { |
| "text": "As far the DR+LIME models are concerned, in the case of Individual Peer Review Classification, the embedding e is a 30 dimensional vector. The MLP that predicts the final acceptance decision has 2 layers. The first layer has 31 input neurons, 16 output neurons. The last layer has 16 input neurons and 1 output neuron. An Adam optimiser is used, with a learning rate of 5 \u00d7 10 \u22124 . The model is trained for 70 epochs, with a batch size of 250. In the case of meta-review classification, the embedding e is a 64 dimensional vector. Similarly with before, The MLP that predicts the final acceptance decision has 2 layers. The first layer has 65 input neurons, 32 output neurons. The last layer has 12 input neurons and 1 output neuron. Lastly, an Adam optimiser is used, with a learning rate of 1 \u00d7 10 \u22124 . The model is trained for 80 epochs, with a batch size of 30.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Implementation Details", |
| "sec_num": null |
| }, |
| { |
| "text": "For the DR+BoW model, the MLP that predicts the final acceptance decision has a single layer with 2 input neurons and 1 output neuron. An Adam optimiser is used, with a learning rate of 8 \u00d7 10 \u22124 . The model is trained for 200 epochs, with a batch size of 500.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B Implementation Details", |
| "sec_num": null |
| }, |
| { |
| "text": "For the SciBERT models (Beltagy et al., 2019) , we perform standard BERT preprocessing, which includes tokenisation and lowercasing. Additionally, we truncate the end of the reviews that exceed 512 input tokens, which is the maximum supported by BERT (Devlin et al., 2018) . The mean length of an ICLR 2017 peer review is 346 input tokens, with a standard deviation of 213 (Kang et al., 2018) . For meta-reviews, the mean length drops to 93 and the standard deviation to 77.", |
| "cite_spans": [ |
| { |
| "start": 23, |
| "end": 45, |
| "text": "(Beltagy et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 251, |
| "end": 272, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 373, |
| "end": 392, |
| "text": "(Kang et al., 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1.1 SciBERT Model Preprocessing", |
| "sec_num": null |
| }, |
| { |
| "text": "Truncating reviews will affect the performance of our models. However, that effect is limited to an extent. Namely, only 10.5% of the peer reviews have a length of more than 512 tokens. From that 10.5%, around half of those reviews have less than 15% of their tokens truncated. Therefore, for those reviews, the effect of the truncation is not extremely adverse. Lastly, 0.5% of all peer reviews have more than half of their tokens truncated which could have a substantial impact in the classification of those few reviews. A histogram with the lengths of peer reviews can be seen in Figure 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 584, |
| "end": 592, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "B.1.1 SciBERT Model Preprocessing", |
| "sec_num": null |
| }, |
| { |
| "text": "For meta-reviews, the effect of truncation is negligible since only 0.05% have more than 512 input tokens. The histogram with the lengths of metareviews can be seen in Figure 4 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 168, |
| "end": 176, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "B.1.1 SciBERT Model Preprocessing", |
| "sec_num": null |
| }, |
| { |
| "text": "For GRU models, apart from tokenisation, we perform punctuation removal, lemmatisation and lowercasing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1.2 GRU Model Preprocessing", |
| "sec_num": null |
| }, |
| { |
| "text": "A summary of the confusion matrices can be seen in Figure 6 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 51, |
| "end": 59, |
| "text": "Figure 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "C Confusion Matrix", |
| "sec_num": null |
| }, |
| { |
| "text": "In this Appendix, we present tables with the explanations we generate with our various models. In Table 7 , we provide the top 50 important words for every non-causal model. In Table 8 , we present the top 50 non-causal and causal words from metareviews. Table 9 contains the top 50 words for causal and non-causal Individual Peer Review Classifiers. Lastly, Table 10 provides the top salient n-grams for causal and non-causal Bag-of-Words models. Table 9 : Top 50 salient words for the task of Individual Peer Classification. In order to make comparisons, we present both causal and non-causal models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 98, |
| "end": 105, |
| "text": "Table 7", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 177, |
| "end": 184, |
| "text": "Table 8", |
| "ref_id": "TABREF9" |
| }, |
| { |
| "start": 255, |
| "end": 262, |
| "text": "Table 9", |
| "ref_id": null |
| }, |
| { |
| "start": 359, |
| "end": 367, |
| "text": "Table 10", |
| "ref_id": null |
| }, |
| { |
| "start": 448, |
| "end": 455, |
| "text": "Table 9", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "D Explanations", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "BoW (Non-Causal) BoW ( ", |
| "cite_spans": [ |
| { |
| "start": 4, |
| "end": 16, |
| "text": "(Non-Causal)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Permutation importance: a corrected feature importance measure", |
| "authors": [ |
| { |
| "first": "Andr\u00e9", |
| "middle": [], |
| "last": "Altmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Tolo\u015fi", |
| "suffix": "" |
| }, |
| { |
| "first": "Oliver", |
| "middle": [], |
| "last": "Sander", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Lengauer", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Bioinformatics", |
| "volume": "26", |
| "issue": "10", |
| "pages": "1340--1347", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/bioinformatics/btq134" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andr\u00e9 Altmann, Laura Tolo\u015fi, Oliver Sander, and Thomas Lengauer. 2010. Permutation importance: a corrected feature importance measure. Bioinfor- matics, 26(10):1340-1347.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Construction of the literature graph in semantic scholar", |
| "authors": [ |
| { |
| "first": "Waleed", |
| "middle": [], |
| "last": "Ammar", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Groeneveld", |
| "suffix": "" |
| }, |
| { |
| "first": "Chandra", |
| "middle": [], |
| "last": "Bhagavatula", |
| "suffix": "" |
| }, |
| { |
| "first": "Iz", |
| "middle": [], |
| "last": "Beltagy", |
| "suffix": "" |
| }, |
| { |
| "first": "Miles", |
| "middle": [], |
| "last": "Crawford", |
| "suffix": "" |
| }, |
| { |
| "first": "Doug", |
| "middle": [], |
| "last": "Downey", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Dunkelberger", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Elgohary", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Feldman", |
| "suffix": "" |
| }, |
| { |
| "first": "Vu", |
| "middle": [], |
| "last": "Ha", |
| "suffix": "" |
| }, |
| { |
| "first": "Rodney", |
| "middle": [], |
| "last": "Kinney", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Kohlmeier", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Tyler", |
| "middle": [], |
| "last": "Murray", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hsu-Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [ |
| "E" |
| ], |
| "last": "Ooi", |
| "suffix": "" |
| }, |
| { |
| "first": "Joanna", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Power", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucy", |
| "middle": [ |
| "Lu" |
| ], |
| "last": "Skjonsberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng", |
| "middle": [], |
| "last": "Wilhelm", |
| "suffix": "" |
| }, |
| { |
| "first": "Madeleine", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Van Zuylen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Waleed Ammar, Dirk Groeneveld, Chandra Bhagavat- ula, Iz Beltagy, Miles Crawford, Doug Downey, Ja- son Dunkelberger, Ahmed Elgohary, Sergey Feld- man, Vu Ha, Rodney Kinney, Sebastian Kohlmeier, Kyle Lo, Tyler Murray, Hsu-Han Ooi, Matthew E. Peters, Joanna Power, Sam Skjonsberg, Lucy Lu Wang, Chris Wilhelm, Zheng Yuan, Madeleine van Zuylen, and Oren Etzioni. 2018. Construction of the literature graph in semantic scholar. CoRR, abs/1805.02262.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2015. Neural machine translation by jointly learning to align and translate. CoRR, abs/1409.0473.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Scibert: Pretrained contextualized embeddings for scientific text", |
| "authors": [ |
| { |
| "first": "Iz", |
| "middle": [], |
| "last": "Beltagy", |
| "suffix": "" |
| }, |
| { |
| "first": "Arman", |
| "middle": [], |
| "last": "Cohan", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iz Beltagy, Arman Cohan, and Kyle Lo. 2019. Scibert: Pretrained contextualized embeddings for scientific text. CoRR, abs/1903.10676.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "BERT: pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: pre-training of deep bidirectional transformers for language under- standing. CoRR, abs/1810.04805.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Discovery of treatments from text corpora", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Fong", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Grimmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1600--1609", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1151" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Fong and Justin Grimmer. 2016. Discovery of treatments from text corpora. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1600-1609, Berlin, Germany. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "DeepSentiPeer: Harnessing sentiment in review texts to recommend peer review decisions", |
| "authors": [ |
| { |
| "first": "Tirthankar", |
| "middle": [], |
| "last": "Ghosal", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajeev", |
| "middle": [], |
| "last": "Verma", |
| "suffix": "" |
| }, |
| { |
| "first": "Asif", |
| "middle": [], |
| "last": "Ekbal", |
| "suffix": "" |
| }, |
| { |
| "first": "Pushpak", |
| "middle": [], |
| "last": "Bhattacharyya", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1120--1130", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1106" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tirthankar Ghosal, Rajeev Verma, Asif Ekbal, and Pushpak Bhattacharyya. 2019. DeepSentiPeer: Har- nessing sentiment in review texts to recommend peer review decisions. In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 1120-1130, Florence, Italy. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Argument mining for understanding peer reviews", |
| "authors": [ |
| { |
| "first": "Xinyu", |
| "middle": [], |
| "last": "Hua", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitko", |
| "middle": [], |
| "last": "Nikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Badugu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xinyu Hua, Mitko Nikolov, Nikhil Badugu, and Lu Wang. 2019. Argument mining for understand- ing peer reviews. CoRR, abs/1903.10104.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Global explanations of neural networks: Mapping the landscape of predictions", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Ibrahim", |
| "suffix": "" |
| }, |
| { |
| "first": "Melissa", |
| "middle": [], |
| "last": "Louie", |
| "suffix": "" |
| }, |
| { |
| "first": "Ceena", |
| "middle": [], |
| "last": "Modarres", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "W" |
| ], |
| "last": "Paisley", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Ibrahim, Melissa Louie, Ceena Modarres, and John W. Paisley. 2019. Global explanations of neu- ral networks: Mapping the landscape of predictions. CoRR, abs/1902.02384.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Attention is not explanation", |
| "authors": [ |
| { |
| "first": "Sarthak", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "Byron", |
| "middle": [ |
| "C" |
| ], |
| "last": "Wallace", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sarthak Jain and Byron C. Wallace. 2019. Attention is not explanation.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A dataset of peer reviews (peerread): Collection, insights and NLP applications", |
| "authors": [ |
| { |
| "first": "Dongyeop", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| }, |
| { |
| "first": "Waleed", |
| "middle": [], |
| "last": "Ammar", |
| "suffix": "" |
| }, |
| { |
| "first": "Bhavana", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| }, |
| { |
| "first": "Madeleine", |
| "middle": [], |
| "last": "Van Zuylen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Kohlmeier", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [ |
| "H" |
| ], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dongyeop Kang, Waleed Ammar, Bhavana Dalvi, Madeleine van Zuylen, Sebastian Kohlmeier, Ed- uard H. Hovy, and Roy Schwartz. 2018. A dataset of peer reviews (peerread): Collection, insights and NLP applications. CoRR, abs/1804.09635.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Text and causal inference: A review of using text to remove confounding from causal estimates", |
| "authors": [ |
| { |
| "first": "Katherine", |
| "middle": [ |
| "A" |
| ], |
| "last": "Keith", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Jensen", |
| "suffix": "" |
| }, |
| { |
| "first": "Brendan O'", |
| "middle": [], |
| "last": "Connor", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katherine A. Keith, David Jensen, and Brendan O'Connor. 2020. Text and causal inference: A review of using text to remove confounding from causal estimates. CoRR, abs/2005.00649.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Interpretability beyond feature attribution: Quantitative testing with concept activation vectors (tcav)", |
| "authors": [ |
| { |
| "first": "Been", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Wattenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Gilmer", |
| "suffix": "" |
| }, |
| { |
| "first": "Carrie", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Wexler", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernanda", |
| "middle": [], |
| "last": "Viegas", |
| "suffix": "" |
| }, |
| { |
| "first": "Rory", |
| "middle": [], |
| "last": "Sayres", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Been Kim, Martin Wattenberg, Justin Gilmer, Car- rie Cai, James Wexler, Fernanda Viegas, and Rory Sayres. 2017. Interpretability beyond feature attri- bution: Quantitative testing with concept activation vectors (tcav).", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Editorial peer reviewers' recommendations at a general medical journal: are they reliable and do editors care?", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Richard", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Kravitz", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Franks", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Feldman", |
| "suffix": "" |
| }, |
| { |
| "first": "Cindy", |
| "middle": [], |
| "last": "Gerrity", |
| "suffix": "" |
| }, |
| { |
| "first": "William M", |
| "middle": [], |
| "last": "Byrne", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tierney", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "PLoS One", |
| "volume": "5", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard L Kravitz, Peter Franks, Mitchell D Feldman, Martha Gerrity, Cindy Byrne, and William M Tier- ney. 2010. Editorial peer reviewers' recommenda- tions at a general medical journal: are they reliable and do editors care? PLoS One, 5(4):e10072.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "The arbitrariness of reviews, and advice for school administrators", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Langford", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Guzdial", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Commun. ACM", |
| "volume": "58", |
| "issue": "4", |
| "pages": "12--13", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/2732417" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Langford and Mark Guzdial. 2015. The arbitrari- ness of reviews, and advice for school administra- tors. Commun. ACM, 58(4):12-13.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The mythos of model interpretability", |
| "authors": [ |
| { |
| "first": "Zachary", |
| "middle": [ |
| "C" |
| ], |
| "last": "Lipton", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zachary C. Lipton. 2016. The mythos of model inter- pretability.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Explainable AI for trees: From local explanations to global understanding", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Scott", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabriel", |
| "middle": [ |
| "G" |
| ], |
| "last": "Lundberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugh", |
| "middle": [], |
| "last": "Erion", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [ |
| "M" |
| ], |
| "last": "Degrave", |
| "suffix": "" |
| }, |
| { |
| "first": "Bala", |
| "middle": [], |
| "last": "Prutkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ronit", |
| "middle": [], |
| "last": "Nair", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Katz", |
| "suffix": "" |
| }, |
| { |
| "first": "Nisha", |
| "middle": [], |
| "last": "Himmelfarb", |
| "suffix": "" |
| }, |
| { |
| "first": "Su-In", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Scott M. Lundberg, Gabriel G. Erion, Hugh Chen, Alex DeGrave, Jordan M. Prutkin, Bala Nair, Ronit Katz, Jonathan Himmelfarb, Nisha Bansal, and Su- In Lee. 2019. Explainable AI for trees: From lo- cal explanations to global understanding. CoRR, abs/1905.04610.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A unified approach to interpreting model predictions", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Scott", |
| "suffix": "" |
| }, |
| { |
| "first": "Su-In", |
| "middle": [], |
| "last": "Lundberg", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "4765--4774", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Scott M Lundberg and Su-In Lee. 2017. A uni- fied approach to interpreting model predictions. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems 30, pages 4765-4774. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Regression modelling and other methods to control confounding", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mcnamee", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Occupational and Environmental Medicine", |
| "volume": "62", |
| "issue": "7", |
| "pages": "500--506", |
| "other_ids": { |
| "DOI": [ |
| "10.1136/oem.2002.001115" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "R McNamee. 2005. Regression modelling and other methods to control confounding. Occupational and Environmental Medicine, 62(7):500-506.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Interpretable Machine Learning", |
| "authors": [ |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Molnar", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christoph Molnar. 2019. Interpretable Machine Learning. https://christophm.github.io/ interpretable-ml-book/.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Pitfalls to avoid when interpreting machine learning models", |
| "authors": [ |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Molnar", |
| "suffix": "" |
| }, |
| { |
| "first": "Gunnar", |
| "middle": [], |
| "last": "K\u00f6nig", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Herbinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Timo", |
| "middle": [], |
| "last": "Freiesleben", |
| "suffix": "" |
| }, |
| { |
| "first": "Susanne", |
| "middle": [], |
| "last": "Dandl", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [ |
| "A" |
| ], |
| "last": "Scholbeck", |
| "suffix": "" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Casalicchio", |
| "suffix": "" |
| }, |
| { |
| "first": "Moritz", |
| "middle": [], |
| "last": "Grosse-Wentrup", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernd", |
| "middle": [], |
| "last": "Bischl", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christoph Molnar, Gunnar K\u00f6nig, Julia Herbinger, Timo Freiesleben, Susanne Dandl, Christian A. Scholbeck, Giuseppe Casalicchio, Moritz Grosse-Wentrup, and Bernd Bischl. 2020. Pitfalls to avoid when interpreting machine learning models.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Definitions, methods, and applications in interpretable machine learning", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Murdoch", |
| "suffix": "" |
| }, |
| { |
| "first": "Chandan", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Kumbier", |
| "suffix": "" |
| }, |
| { |
| "first": "Reza", |
| "middle": [], |
| "last": "Abbasi-Asl", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the National Academy of Sciences", |
| "volume": "116", |
| "issue": "", |
| "pages": "22071--22080", |
| "other_ids": { |
| "DOI": [ |
| "10.1073/pnas.1900654116" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "W. James Murdoch, Chandan Singh, Karl Kumbier, Reza Abbasi-Asl, and Bin Yu. 2019. Definitions, meth- ods, and applications in interpretable machine learn- ing. Proceedings of the National Academy of Sciences, 116(44):22071-22080.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Elements of causal inference", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Peters. 2017. Elements of causal inference.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Interpretable neural architectures for attributing an ad's performance to its writing style", |
| "authors": [ |
| { |
| "first": "Reid", |
| "middle": [], |
| "last": "Pryzant", |
| "suffix": "" |
| }, |
| { |
| "first": "Sugato", |
| "middle": [], |
| "last": "Basu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kazoo", |
| "middle": [], |
| "last": "Sone", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "125--135", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-5415" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reid Pryzant, Sugato Basu, and Kazoo Sone. 2018a. Inter- pretable neural architectures for attributing an ad's perfor- mance to its writing style. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpret- ing Neural Networks for NLP, pages 125-135, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Predicting sales from the language of product descriptions", |
| "authors": [ |
| { |
| "first": "Reid", |
| "middle": [], |
| "last": "Pryzant", |
| "suffix": "" |
| }, |
| { |
| "first": "Youngjoo", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "eCOM@SIGIR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reid Pryzant, Youngjoo Chung, and Dan Jurafsky. 2017. Pre- dicting sales from the language of product descriptions. In eCOM@SIGIR.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Deconfounded lexicon induction for interpretable social science", |
| "authors": [ |
| { |
| "first": "Reid", |
| "middle": [], |
| "last": "Pryzant", |
| "suffix": "" |
| }, |
| { |
| "first": "Kelly", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Wagner", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1615--1625", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1146" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reid Pryzant, Kelly Shen, Dan Jurafsky, and Stefan Wagner. 2018b. Deconfounded lexicon induction for interpretable social science. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1615-1625, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "why should i trust you?\": Explaining the predictions of any classifier", |
| "authors": [ |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Marco Tulio Ribeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Guestrin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '16", |
| "volume": "", |
| "issue": "", |
| "pages": "1135--1144", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/2939672.2939778" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Tulio Ribeiro, Sameer Singh, and Carlos Guestrin. 2016. \"why should i trust you?\": Explaining the predic- tions of any classifier. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discov- ery and Data Mining, KDD '16, page 1135-1144, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Adjusting for confounding with text matching", |
| "authors": [ |
| { |
| "first": "Margaret", |
| "middle": [ |
| "E" |
| ], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Brandon", |
| "middle": [ |
| "M" |
| ], |
| "last": "Stewart", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [ |
| "A" |
| ], |
| "last": "Nielsen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "American Journal of Political Science", |
| "volume": "64", |
| "issue": "4", |
| "pages": "887--903", |
| "other_ids": { |
| "DOI": [ |
| "10.1111/ajps.12526" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Margaret E. Roberts, Brandon M. Stewart, and Richard A. Nielsen. 2020. Adjusting for confounding with text match- ing. American Journal of Political Science, 64(4):887-903.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead", |
| "authors": [ |
| { |
| "first": "Cynthia", |
| "middle": [], |
| "last": "Rudin", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cynthia Rudin. 2018. Stop explaining black box machine learning models for high stakes decisions and use inter- pretable models instead.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "A social media study on the effects of psychiatric medication use", |
| "authors": [ |
| { |
| "first": "Koustuv", |
| "middle": [], |
| "last": "Saha", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Sugar", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Torous", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruno", |
| "middle": [], |
| "last": "Abrahao", |
| "suffix": "" |
| }, |
| { |
| "first": "Emre", |
| "middle": [], |
| "last": "K\u0131c\u0131man", |
| "suffix": "" |
| }, |
| { |
| "first": "Munmun De", |
| "middle": [], |
| "last": "Choudhury", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the International AAAI Conference on Web and Social Media", |
| "volume": "13", |
| "issue": "", |
| "pages": "440--451", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Koustuv Saha, Benjamin Sugar, John Torous, Bruno Abrahao, Emre K\u0131c\u0131man, and Munmun De Choudhury. 2019. A social media study on the effects of psychiatric medication use. Proceedings of the International AAAI Conference on Web and Social Media, 13(01):440-451.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Causality for machine learning", |
| "authors": [ |
| { |
| "first": "Bernhard", |
| "middle": [], |
| "last": "Sch\u00f6lkopf", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bernhard Sch\u00f6lkopf. 2019. Causality for machine learning.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Submission Corpus", |
| "authors": [ |
| { |
| "first": "Lukas", |
| "middle": [], |
| "last": "Stappen", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgios", |
| "middle": [], |
| "last": "Rizos", |
| "suffix": "" |
| }, |
| { |
| "first": "Madina", |
| "middle": [], |
| "last": "Hasan", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Hain", |
| "suffix": "" |
| }, |
| { |
| "first": "Bj\u00f6rn", |
| "middle": [ |
| "W" |
| ], |
| "last": "Schuller", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proc. Interspeech 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "1808--1812", |
| "other_ids": { |
| "DOI": [ |
| "10.21437/Interspeech.2020-2862" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lukas Stappen, Georgios Rizos, Madina Hasan, Thomas Hain, and Bj\u00f6rn W. Schuller. 2020. Uncertainty-Aware Machine Support for Paper Reviewing on the Interspeech 2019 Sub- mission Corpus. In Proc. Interspeech 2020, pages 1808- 1812.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Reviewer bias in single-versus double-blind peer review", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Tomkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "D" |
| ], |
| "last": "Heavlin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the National Academy of Sciences", |
| "volume": "114", |
| "issue": "48", |
| "pages": "12708--12713", |
| "other_ids": { |
| "DOI": [ |
| "10.1073/pnas.1707323114" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Tomkins, Min Zhang, and William D. Heavlin. 2017. Reviewer bias in single-versus double-blind peer re- view. Proceedings of the National Academy of Sciences, 114(48):12708-12713.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Using text embeddings for causal inference. CoRR, abs", |
| "authors": [ |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Veitch", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhanya", |
| "middle": [], |
| "last": "Sridhar", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "M" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| } |
| ], |
| "year": 1905, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Veitch, Dhanya Sridhar, and David M. Blei. 2019. Using text embeddings for causal inference. CoRR, abs/1905.12741.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Sentiment analysis of peer review texts for scholarly papers", |
| "authors": [ |
| { |
| "first": "Ke", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "The 41st International ACM SIGIR Conference on Research & Development in Information Retrieval, SIGIR '18", |
| "volume": "", |
| "issue": "", |
| "pages": "175--184", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3209978.3210056" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ke Wang and Xiaojun Wan. 2018. Sentiment analysis of peer review texts for scholarly papers. In The 41st International ACM SIGIR Conference on Research & Development in Information Retrieval, SIGIR '18, page 175-184, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Attention is not not explanation", |
| "authors": [ |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Wiegreffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuval", |
| "middle": [], |
| "last": "Pinter", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sarah Wiegreffe and Yuval Pinter. 2019. Attention is not not explanation.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Graphical model depicting the confounding effect: C -Confounder, T -Treatment, Y -Outcome.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF1": { |
| "text": "Deep Residualisation Architecture", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF2": { |
| "text": "Histogram of the number of input tokens per peer review.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF3": { |
| "text": "Histogram of the number of input tokens per meta-review.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF4": { |
| "text": "", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "TABREF0": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Model Type</td><td>Model</td><td colspan=\"3\">Accuracy Macro-F1 Weighted-F1</td></tr><tr><td/><td>Majority Baseline</td><td>59.52</td><td>0.3731</td><td>0.4442</td></tr><tr><td>Baselines</td><td>PeerRead (only paper) DeepSentiPeer (paper & review)</td><td>65.30 71.05</td><td>N/A N/A</td><td>N/A N/A</td></tr><tr><td>Meta-Review</td><td>SciBERT GRU</td><td>89.47 86.84</td><td>0.8899 0.8661</td><td>0.8947 0.8698</td></tr><tr><td>Final Decision Peer Review</td><td>SciBERT</td><td>86.84</td><td>0.8606</td><td>0.8676</td></tr><tr><td>Table 1: Model Type</td><td>Model</td><td colspan=\"3\">Accuracy Macro-F1 Weighted-F1</td></tr><tr><td>Baselines</td><td>Majority Baseline Logistic Regression</td><td>50.76 58.33</td><td>0.3367 0.5725</td><td>0.3418 0.5756</td></tr><tr><td colspan=\"2\">Individual Peer Review SciBERT GRU</td><td>80.30 70.45</td><td>0.8026 0.7007</td><td>0.8028 0.7012</td></tr></table>", |
| "text": "Performance of the classifiers that predict the final decision prediction (i.e., whether a paper is accepted for publication on ICLR 2017) for a paper on the test set of PeerRead. We see that using Meta-reviews in training yields an advantage, as does peer review fusion.", |
| "num": null |
| }, |
| "TABREF1": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "Performance on the Individual Peer Review Prediction (i.e., whether a reviewer suggests that a paper is accepted for publication), using on the test set of PeerRead.", |
| "num": null |
| }, |
| "TABREF3": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "Performance of the Deconfounded lexicons. All lexicons have a size of 50 words. For each type of classifier, we present the Informativeness Coefficient both of the non-causal lexicon and of the deconfounded lexicon, generated through Deep Residualisation (DR).", |
| "num": null |
| }, |
| "TABREF5": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "Description of ICLR 2017 section of Peer-Read.", |
| "num": null |
| }, |
| "TABREF7": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>BERT -Meta-Review</td><td>GRU -Meta-Review</td><td>BERT -Individual Peer Review</td><td>GRU -Individual Peer Review</td><td>BERT -Final Decision Peer Review</td></tr><tr><td>enjoyed</td><td>positively</td><td>interspeech</td><td>not</td><td>kolter</td></tr><tr><td>cnns</td><td>accept</td><td>zhang</td><td>attempt</td><td>aaai</td></tr><tr><td>resubmit</td><td>suspiciously</td><td>prons</td><td colspan=\"2\">comprehensive not</td></tr><tr><td>community</td><td>wrong</td><td>dismissed</td><td>interfere</td><td>third</td></tr><tr><td>limited</td><td>yet</td><td>third</td><td>variational</td><td>efficiently</td></tr><tr><td colspan=\"2\">experimentation perhaps</td><td>p6</td><td>entirely</td><td>untrue</td></tr><tr><td>3</td><td>mix</td><td>confusing</td><td colspan=\"2\">systematically cifar10</td></tr><tr><td>workshop</td><td>limit</td><td>resulting</td><td>perfect</td><td>venue</td></tr><tr><td>poster</td><td>guidance</td><td>geometry</td><td>sure</td><td>recovered</td></tr><tr><td>topic</td><td>read</td><td colspan=\"2\">unconvincing lda</td><td>huffman</td></tr><tr><td>variational</td><td>impressive</td><td>honestly</td><td>formalize</td><td>riemannian</td></tr><tr><td>limit</td><td>surprise</td><td>not</td><td>tolerant</td><td>cifar</td></tr><tr><td>although</td><td>enough</td><td>community</td><td>valid</td><td>arxiv</td></tr><tr><td>not</td><td>incremental</td><td>cannot</td><td>belief</td><td>accompanying</td></tr><tr><td>sound</td><td>sufficiently</td><td>superficial</td><td>gan</td><td>extended</td></tr><tr><td>none</td><td>timely</td><td>readership</td><td>imposible</td><td>convex</td></tr><tr><td>accepted</td><td>journal</td><td>big</td><td>adagrad</td><td>github</td></tr><tr><td>dropout</td><td>post</td><td>suggestion</td><td>semantically</td><td>trace</td></tr><tr><td>imagenet</td><td>contribute</td><td>dialogue</td><td>aware</td><td>eigenspectra</td></tr><tr><td>agent</td><td>ready</td><td>revisit</td><td>clear</td><td>encourage</td></tr><tr><td>justify</td><td>not</td><td>bag</td><td>carefully</td><td>code</td></tr><tr><td>generative</td><td>variational</td><td>analysed</td><td>j.</td><td>asynchronous</td></tr><tr><td>advantage</td><td>nicely</td><td>icml</td><td>and/or</td><td>crucial</td></tr><tr><td>unfortunately</td><td>receive</td><td>taken</td><td>arguably</td><td>auxiliary</td></tr><tr><td>video</td><td>submit</td><td>typo</td><td>fully</td><td>investigated</td></tr><tr><td>review</td><td>relevant</td><td>submitted</td><td>offer</td><td>iclrw2016</td></tr><tr><td>solver</td><td>much</td><td>energy</td><td>vs.</td><td>hyperparameters</td></tr><tr><td>three</td><td>high</td><td>nice</td><td>confident</td><td>discus</td></tr><tr><td>task</td><td>weak</td><td>spirit</td><td>message</td><td>dcgan</td></tr><tr><td>use</td><td>good</td><td>competitive</td><td>object</td><td>interesting</td></tr><tr><td>confident</td><td>write</td><td>per</td><td>receive</td><td>nice</td></tr><tr><td>generalisation</td><td>whilst</td><td>highlighted</td><td>probably</td><td>game</td></tr><tr><td>supervised</td><td>into</td><td colspan=\"3\">multimodality multimodality demonstration</td></tr><tr><td>promise</td><td>below</td><td>far</td><td>strictly</td><td>lample</td></tr><tr><td>raised</td><td>benefit</td><td>04562</td><td>directly</td><td>the</td></tr><tr><td>understanding</td><td>rebuttal</td><td>lack</td><td>twitter</td><td>significance</td></tr><tr><td>timely</td><td>generative</td><td>preprint</td><td>join</td><td>seq2seq</td></tr><tr><td>reject</td><td>strengthen</td><td>dcgan</td><td>e.g.</td><td>community</td></tr><tr><td>unsatisfactory</td><td>meet</td><td>conduct</td><td>observe</td><td>perspective</td></tr><tr><td>yet</td><td>variety</td><td>word2vec</td><td>complete</td><td>paper</td></tr><tr><td>mixed</td><td>give</td><td>wen</td><td>explain</td><td>thanks</td></tr><tr><td>shot</td><td>hide</td><td>gan</td><td>novelty</td><td>investigates</td></tr><tr><td>propose</td><td>recommend</td><td>rejected</td><td>dualnets</td><td>insightful</td></tr><tr><td>how</td><td>badly</td><td>start</td><td>convince</td><td>uploaded</td></tr><tr><td>rejection</td><td>long</td><td>towards</td><td>handle</td><td>narrow</td></tr><tr><td>thus</td><td>clear</td><td colspan=\"2\">multiplication nintendo</td><td>match</td></tr><tr><td>salimans</td><td>convince</td><td colspan=\"2\">generalisation theorem</td><td>seed</td></tr><tr><td>provided</td><td>gain</td><td>auxiliary</td><td>satisfy</td><td>tsp</td></tr><tr><td>goal</td><td>serious</td><td>parametric</td><td>ready</td><td>adversarial</td></tr><tr><td>reservation</td><td>maintain</td><td>enjoyed</td><td>demand</td><td>isn</td></tr></table>", |
| "text": "Final Decision Classifier Confusion Matrices on the test set of PeerRead(Kang et al., 2018)", |
| "num": null |
| }, |
| "TABREF8": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"2\">GRU (Non-Causal) GRU (DR+ATTN)</td></tr><tr><td>positively</td><td>nicely</td></tr><tr><td>accept</td><td>overcomplicated</td></tr><tr><td>suspiciously</td><td>positively</td></tr><tr><td>wrong</td><td>yet</td></tr><tr><td>yet</td><td>rebuttal</td></tr><tr><td>perhaps</td><td>ready</td></tr><tr><td>mix</td><td>sufficiently</td></tr><tr><td>limit</td><td>properly</td></tr><tr><td>guidance</td><td>acceptance</td></tr><tr><td>read</td><td>surprise</td></tr><tr><td>impressive</td><td>usual</td></tr><tr><td>surprise</td><td>into</td></tr><tr><td>enough</td><td>describe</td></tr><tr><td>incremental</td><td>finding</td></tr><tr><td>sufficiently</td><td>organize</td></tr><tr><td>timely</td><td>execution</td></tr><tr><td>journal</td><td>insight</td></tr><tr><td>post</td><td>receive</td></tr><tr><td>contribute</td><td>criterion</td></tr><tr><td>ready</td><td>overall</td></tr><tr><td>not</td><td>good</td></tr><tr><td>variational</td><td>closely</td></tr><tr><td>nicely</td><td>wise</td></tr><tr><td>receive</td><td>suspiciously</td></tr><tr><td>submit</td><td>well</td></tr><tr><td>relevant</td><td>badly</td></tr><tr><td>much</td><td>write</td></tr><tr><td>high</td><td>pair</td></tr><tr><td>weak</td><td>explain</td></tr><tr><td>good</td><td>not</td></tr><tr><td>write</td><td>simple</td></tr><tr><td>whilst</td><td>weak</td></tr><tr><td>into</td><td>asset</td></tr><tr><td>below</td><td>great</td></tr><tr><td>benefit</td><td>out</td></tr><tr><td>rebuttal</td><td>execute</td></tr><tr><td>generative</td><td>helpful</td></tr><tr><td>strengthen</td><td>enjoy</td></tr><tr><td>meet</td><td>conditional</td></tr><tr><td>variety</td><td>mix</td></tr><tr><td>give</td><td>entirely</td></tr><tr><td>hide</td><td>contribution</td></tr><tr><td>recommend</td><td>world</td></tr><tr><td>badly</td><td>demonstrate</td></tr><tr><td>long</td><td>accept</td></tr><tr><td>clear</td><td>assume</td></tr><tr><td>convince</td><td>post</td></tr><tr><td>gain</td><td>publication</td></tr><tr><td>serious</td><td>effective</td></tr><tr><td>maintain</td><td>clearly</td></tr></table>", |
| "text": "Top 50 salient words for non-causal models.", |
| "num": null |
| }, |
| "TABREF9": { |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>interspeech</td><td>interspeech</td><td>not</td><td>identical</td></tr><tr><td>zhang</td><td>dismissed</td><td>attempt</td><td>sure</td></tr><tr><td>prons</td><td>prons</td><td>comprehensive</td><td>premise</td></tr><tr><td>dismissed</td><td>geometry</td><td>interfere</td><td>carefully</td></tr><tr><td>third</td><td>p6</td><td>variational</td><td>heavy</td></tr><tr><td>p6</td><td>submitted</td><td>entirely</td><td>specify</td></tr><tr><td>confusing</td><td>unfair</td><td>systematically</td><td>tease</td></tr><tr><td>resulting</td><td>unconvincing</td><td>perfect</td><td>interfere</td></tr><tr><td>geometry</td><td>readership</td><td>sure</td><td>necessarily</td></tr><tr><td>unconvincing</td><td>honestly</td><td>lda</td><td>adagrad</td></tr><tr><td>honestly</td><td>not</td><td>formalize</td><td>attempt</td></tr><tr><td>not</td><td>spirit</td><td>tolerant</td><td>novelty</td></tr><tr><td>community</td><td>confusing</td><td>valid</td><td>fully</td></tr><tr><td>cannot</td><td>analysed</td><td>belief</td><td>repeat</td></tr><tr><td>superficial</td><td>bag</td><td>gan</td><td>systematically</td></tr><tr><td>readership</td><td>unable</td><td>imposible</td><td>not</td></tr><tr><td>big</td><td>cannot</td><td>adagrad</td><td>gray</td></tr><tr><td>suggestion</td><td>rejected</td><td>semantically</td><td>theoretically</td></tr><tr><td>dialogue</td><td>insightful</td><td>aware</td><td>anymore</td></tr><tr><td>revisit</td><td>multiplication</td><td>clear</td><td>role</td></tr><tr><td>bag</td><td>highlighted</td><td>carefully</td><td>belief</td></tr><tr><td>analysed</td><td>enjoyed</td><td>j.</td><td>recommendation</td></tr><tr><td>icml</td><td>misleading</td><td>and/or</td><td>almost</td></tr><tr><td>taken</td><td>disagree</td><td>arguably</td><td>consistently</td></tr><tr><td>typo</td><td>dialogue</td><td>fully</td><td>primary</td></tr><tr><td>submitted</td><td>mu_</td><td>offer</td><td>subtraction</td></tr><tr><td>energy</td><td>wen</td><td>vs.</td><td>good</td></tr><tr><td>nice</td><td>lack</td><td>confident</td><td>satisfactory</td></tr><tr><td>spirit</td><td>nice</td><td>message</td><td>background</td></tr><tr><td>competitive</td><td>multimodality</td><td>object</td><td>write</td></tr><tr><td>per</td><td>welcome</td><td>receive</td><td>compute</td></tr><tr><td>highlighted</td><td>conduct</td><td>probably</td><td>easy</td></tr><tr><td>multimodality</td><td>recommender</td><td>multimodality</td><td>teach</td></tr><tr><td>far</td><td>encourage</td><td>strictly</td><td>enough</td></tr><tr><td>04562</td><td>dualnets</td><td>directly</td><td>convince</td></tr><tr><td>lack</td><td>thanks</td><td>twitter</td><td>except</td></tr><tr><td>preprint</td><td>cdl</td><td>join</td><td>ready</td></tr><tr><td>dcgan</td><td>preprint</td><td>e.g.</td><td>explain</td></tr><tr><td>conduct</td><td>04562</td><td>observe</td><td>usage</td></tr><tr><td>word2vec</td><td>enjoy</td><td>complete</td><td>sell</td></tr><tr><td>wen</td><td>revisit</td><td>explain</td><td>unfortunately</td></tr><tr><td>gan</td><td>community</td><td>novelty</td><td>arguably</td></tr><tr><td>rejected</td><td>appreciate</td><td>dualnets</td><td>yet</td></tr><tr><td>start</td><td>principled</td><td>convince</td><td>especially</td></tr><tr><td>towards</td><td>medical</td><td>handle</td><td>elegantly</td></tr><tr><td>multiplication</td><td>coding</td><td>nintendo</td><td>sufficiently</td></tr><tr><td>generalisation</td><td>drnn</td><td>theorem</td><td>handle</td></tr><tr><td>auxiliary</td><td>accompanying</td><td>satisfy</td><td>cdl</td></tr><tr><td>parametric</td><td>factorization</td><td>ready</td><td>setup</td></tr><tr><td>enjoyed</td><td>jmlr</td><td>demand</td><td>resception</td></tr></table>", |
| "text": "Top 50 salient words for meta-reviews. The first column contains the explanations of the non-causal models. The second column contains the deconfounded lexicon.BERT (Non-Causal) BERT (DR+LIME) GRU (Non-Causal) GRU(DR+ATTN)", |
| "num": null |
| } |
| } |
| } |
| } |