| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T01:10:36.481526Z" |
| }, |
| "title": "Multiple Instance Learning for Content Feedback Localization without Annotation", |
| "authors": [ |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Hellman", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "scott.hellman@pearson.com" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "R" |
| ], |
| "last": "Murray", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "william.murray@pearson.com" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Wiemerslage", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "adam.wiemerslage@pearson.com" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Rosenstein", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "mark.rosenstein@pearson.com" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Foltz", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "peter.foltz@pearson.com" |
| }, |
| { |
| "first": "Lee", |
| "middle": [], |
| "last": "Becker", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "lee.becker@pearson.com" |
| }, |
| { |
| "first": "Marcia", |
| "middle": [], |
| "last": "Derr", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "marcia.derr@pearson.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Automated Essay Scoring (AES) can be used to automatically generate holistic scores with reliability comparable to human scoring. In addition, AES systems can provide formative feedback to learners, typically at the essay level. In contrast, we are interested in providing feedback specialized to the content of the essay, and specifically for the content areas required by the rubric. A key objective is that the feedback should be localized alongside the relevant essay text. An important step in this process is determining where in the essay the rubric designated points and topics are discussed. A natural approach to this task is to train a classifier using manually annotated data; however, collecting such data is extremely resource intensive. Instead, we propose a method to predict these annotation spans without requiring any labeled annotation data. Our approach is to consider AES as a Multiple Instance Learning (MIL) task. We show that such models can both predict content scores and localize content by leveraging their sentence-level score predictions. This capability arises despite never having access to annotation training data. Implications are discussed for improving formative feedback and explainable AES models.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Automated Essay Scoring (AES) can be used to automatically generate holistic scores with reliability comparable to human scoring. In addition, AES systems can provide formative feedback to learners, typically at the essay level. In contrast, we are interested in providing feedback specialized to the content of the essay, and specifically for the content areas required by the rubric. A key objective is that the feedback should be localized alongside the relevant essay text. An important step in this process is determining where in the essay the rubric designated points and topics are discussed. A natural approach to this task is to train a classifier using manually annotated data; however, collecting such data is extremely resource intensive. Instead, we propose a method to predict these annotation spans without requiring any labeled annotation data. Our approach is to consider AES as a Multiple Instance Learning (MIL) task. We show that such models can both predict content scores and localize content by leveraging their sentence-level score predictions. This capability arises despite never having access to annotation training data. Implications are discussed for improving formative feedback and explainable AES models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The assessment of writing is an integral component in the pedagogical use of constructed response items. Often, a student's response is scored according to a rubric that specifies the components of writing to be assessed -such as content, grammar, and organization -and establishes an ordinal scale to assign a score for each of those components. Furthermore, there is strong evidence of learning improvements when instructors provide feedback to their students (Graham et al., 2011) . Their comments can take the form of holistic, document-level feedback, or more specific, targeted feedback that addresses an error or praises an insight at relevant locations in the paper.", |
| "cite_spans": [ |
| { |
| "start": 462, |
| "end": 483, |
| "text": "(Graham et al., 2011)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As far back as the 1960s, computers have been employed in essay scoring (Page, 1966) . Thus, automated essay scoring (AES) is a well-studied area, and with modern approaches, AES systems are often as reliable as human scorers (Shermis and Burstein, , 2013 . However, many of these systems are limited to providing holistic scores -that is, they assign an ordinal value for every component in the rubric.", |
| "cite_spans": [ |
| { |
| "start": 72, |
| "end": 84, |
| "text": "(Page, 1966)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 239, |
| "end": 255, |
| "text": "Burstein, , 2013", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Furthermore, some AES systems can provide document-level feedback, but this requires students to interpret which parts of their text the feedback refers to. When an automated scoring system additionally provides location information, students can leverage a more specific frame of reference to better understand the feedback. Indeed, students are more likely to understand and implement revisions when given feedback that summarizes and localizes relevant information (Patchan et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 468, |
| "end": 490, |
| "text": "(Patchan et al., 2016)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We are interested in automatically providing localized feedback on the content of an essay. The specific kinds of feedback provided can vary, ranging from positive feedback reinforcing that a student correctly covered a specific topic, to feedback indicating areas that the student could improve. This latter category includes errors such as domain misconceptions or inadequate citations. We consider wholly omitted topics to be outside the scope of localized feedback, as they represent an overall issue in the essay that is best addressed by essaylevel feedback.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "From a machine learning perspective, content localization is difficult. Current automated localization is often very fine-grained, e.g., grammar checkers can identify spelling or grammar mistakes at the word level. However, we view the content of a student's essay as primarily a sentence-level aspect of student writing. Critically, to provide this type of content feedback, we need to be able to detect where in their essay a student is discussing that particular content. One approach would be to collect a corpus of training data containing essays with annotations indicating text spans where topics of interest were discussed. A supervised machine learning classifier could be trained on this data, and this localization model could then be integrated into a full AES feedback system. For example, a scoring model could identify the degree of coverage of rubric-required topics t 1 , . . . , t n . A formative feedback system could generate suggestions for inadequately covered topics. Finally, the localization system could identify where this formative feedback should be presented. In this work, we address the localization part of this process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "While AES systems typically provide scoring of several rubric traits, we are interested primarily in the details of an essay's content, and so our work here focuses on a detailed breakdown of content coverage into individual topics. For example, consider a prompt that asks students to discuss how to construct a scientific study on the benefits of aromatherapy. Each student answer is a short essay, and is scored on its coverage of six content topics. Examples of these topics include discussion of independent and dependent variables, defining a blind study, and discussing the difficulties in designing a blind study for aromatherapy. These kinds of content topics are what our localization efforts are focused on. Figure 1 shows a a screenshot from an annotation tool containing an example essay with human-provided annotations and scores.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 719, |
| "end": 727, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The downside of building a localization classifier based on annotation data is that such annotation data is very expensive to collect. Holistic scoring data itself is expensive to collect, and obtaining reliable annotations is even more difficult to orchestrate. Due to these issues, an approach that eliminates annotation training data is desirable. We propose a weakly-supervised multiple instance learning (MIL) approach to content localization, that relies on either document-level scoring information, or on a set of manually curated reference sentences. We show that both approaches can perform well at the topic localization task, without having been trained on localization data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Automated Essay Scoring systems for providing holistic scoring are well studied (Shermis and Burstein, , 2013 . Some systems are specifically designed to provide formative feedback, with or without an accompanying overall score. Roscoe et al. (2012) presents an automated feedback system that measures attributes of the student response and provides specific feedback if certain thresholds are met (e.g., \"use larger words\" when the mean syllables per word is too low). In Foltz et al. (2000) an AES system is shown that uses Latent Semantic Analysis (LSA) to measure similarities between student sentences and reference sentences. Each required topic has a set of 1-3 reference sentences, and if no sentence in the student essay is similar to any reference sentences for that topic, feedback encouraging the student to more fully describe the topic is presented. Summary Street R provides students with content feedback during the summarization task, and specifically uses a reference document with LSA for semantic comparison (Steinhart, 2001; Franzke et al., 2005 ).", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 109, |
| "text": "Burstein, , 2013", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 229, |
| "end": 249, |
| "text": "Roscoe et al. (2012)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 473, |
| "end": 492, |
| "text": "Foltz et al. (2000)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1028, |
| "end": 1045, |
| "text": "(Steinhart, 2001;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1046, |
| "end": 1066, |
| "text": "Franzke et al., 2005", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring and Feedback", |
| "sec_num": "2" |
| }, |
| { |
| "text": "There has been effort toward providing students with localized feedback as well. presents a system that uses an ensemble of supervised machine learning models to locate and provide feedback on discourse components such as thesis statements. Similarly, Chukharev-Hudilainen and Saricaoglu (2016) presents a system that provides feedback on discourse structure in essays written by English language learners.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring and Feedback", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A major drawback of these more localized feedback systems is the requirement that they be trained on annotation data, which is expensive to gather. Our work, which removes this constraint, is inspired by approaches that determine the contribution of individual sentences to the overall essay score. One such approach is described in Dong et al. (2017) , which presents a neural network that generates an attention vector over the sentences in a response. This attention vector directly relates to the importance of each individual sentence in the computation of the final predicted score. Woods et al. (2017) attempts to localize feedback based purely on the output of a holistic AES model. Specifically, they train an ordinal logistic regression model on a feature space consisting of character, word, and part-of-speech n-grams. They show that this model performs well on the AES task. They then propose a method for determining the contribution of each sentence to the overall score by measuring how much more likely a lower (or higher) score would be if that sentence was re- moved. They then use the Mahalanobis distance to compute how much that sentence's contribution differs from a known distribution of sentence contributions. Finally, they present feedback to the student, localized to sentences that were either noticeably beneficial or detrimental to the overall essay.", |
| "cite_spans": [ |
| { |
| "start": 333, |
| "end": 351, |
| "text": "Dong et al. (2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 589, |
| "end": 608, |
| "text": "Woods et al. (2017)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring and Feedback", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We are interested in almost exactly the same task as Woods et al. (2017) -the only difference is that we aim to predict the locations humans would annotate, while their goal was to evaluate the effectiveness of their localized feedback. Specifically, we frame annotation prediction as a task with a set of essays and a set of labels, such that each sentence in each essay has a binary label indicating whether or not the specified topic was covered in that sentence. The goal is to develop a model that can predict these binary labels given the essays.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring and Feedback", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Latent Dirichlet Allocation (LDA) is an unsupervised method for automatically identifying topics in a document (Blei et al., 2003) , and is related to our goal of identifying sentences that received human annotations. This requires an assumption that the human annotators identified sentences that could match a specific topic learned by LDA. While there is some work on using LDA to aid in annotation (Camelin et al., 2011) , we are unaware of any attempts to extend it to the educational writing domain. Our approach differs from LDA in that we use supervised techniques whose predictions can be transferred to the annotation domain, rather than approaching the problem as a wholly unsupervised task. Additionally, we are classifying sentences by topics rather than explicitly creating word topic models for the topics.", |
| "cite_spans": [ |
| { |
| "start": 111, |
| "end": 130, |
| "text": "(Blei et al., 2003)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 402, |
| "end": 424, |
| "text": "(Camelin et al., 2011)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring and Feedback", |
| "sec_num": "2" |
| }, |
| { |
| "text": "If one views student essays as summaries (e.g., of the section of the textbook that the writing prompt corresponds to), then summarization evaluation approaches could be applicable. In particular, the PEAK algorithm (Yang et al., 2016) builds a hypergraph of subject-predicate-object triples, and then salient nodes in that graph are identified. These salient nodes are then collected into summary content units (SCUs), which can be used to score summaries. In our case, these SCUs would correspond to recurring topics in the student essays. One possible application of PEAK to our annotation prediction problem would be to run PEAK on a collection of high-scoring student essays. Similarity to the identified SCUs could then be used as a weak signal of the presence of a human annotation for a given sentence. Our approach differs from this application of PEAK in that we not only utilize similarity to sentences from high-scoring essays, but also use sentences from low-scoring essays as negative examples for a given topic.", |
| "cite_spans": [ |
| { |
| "start": 216, |
| "end": 235, |
| "text": "(Yang et al., 2016)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring and Feedback", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To accomplish our goal of predicting annotations without having access to annotation data, we approach AES as a multiple instance learning regression problem. Multiple instance learning is a supervised learning paradigm in which the goal is to label bags of items, where the number of items in a bag can vary. The items in a bag are also referred to as instances. MIL is a well-studied area of machine learning, with a broad literature into its applications both in NLP (e.g., Bunescu and Mooney (2007) ) and in general settings (e.g., Diet-terich et al. (1997) ). The description provided here is based on Carbonneau et al. (2016) .", |
| "cite_spans": [ |
| { |
| "start": 477, |
| "end": 502, |
| "text": "Bunescu and Mooney (2007)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 536, |
| "end": 561, |
| "text": "Diet-terich et al. (1997)", |
| "ref_id": null |
| }, |
| { |
| "start": 607, |
| "end": 631, |
| "text": "Carbonneau et al. (2016)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiple Instance Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The standard description of MIL assumes that the goal is a binary classification. Intuitively, each bag has a known binary label, and we can think of the instances in a bag as having unknown binary labels. We then assume that the bag label is some aggregation of the unknown instance labels. We first describe MIL in these terms, and then extend those ideas to regression.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiple Instance Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Formally, let X denote our collection of training data, and let i denote an index over bags, such that each X i \u2208 X is of the form", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiple Instance Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "X i = {x i,1 , x i,2 , . . . , x i,m }.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiple Instance Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Note that m can differ among the elements of X, that is, the cardinalities of two elements X i , X j \u2208 X need not be equal. Let Y denote our training labels, such that each X i has a corresponding Y i \u2208 {0, 1}. We assume that there is a latent label for each instance x i,j , denoted by y i,j . Note that, in our specific application, x i,j corresponds to the j-th sentence of the i-th document in our corpus. The standard assumption in MIL asserts that", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiple Instance Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Y i = 0 if \u2200x i,j \u2208 X i , y i,j = 0 1 if \u2203x i,j \u2208 X i , y i,j = 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiple Instance Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "That is, the standard assumption holds that a bag is positive if any of its constituent instances are positive. Another way of framing this assumption is that a single instance is responsible for an entire bag being positive. In contrast, the collective assumption holds that Y i is determined by some aggregation function over all of the instances in a bag. Thus, under the collective assumption, a bag's label is dependent upon more than one and possibly all of the instances in that bag.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiple Instance Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "AES is usually approached as a regression task, so these notions must be extended to regression. We adapt the standard assumption, that a single instance determines the bag label, by using a function that selects a single instance value from the bag. In this work, we use the maximum instance label. We adapt the collective assumption, that all instance labels contribute to the bag label, by using a function that aggregates across all instance labels. In this work, we use the mean instance label.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiple Instance Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The application of MIL to natural language processing tasks is quite common. Wang et al. (2016) trains a convolutional neural network to aggregate predictions across sentences in order to predict discussion of events in written articles. By framing this task as a MIL problem, not only can they learn to predict the types of events articles pertain to, they can also predict which sentences specifically discuss those events. A variety of similar approaches that assign values to sentences and then use aggregation to create document scores have been used for sentiment analysis (Kotzias et al., 2015; Pappas and Popescu-Belis, 2017; Angelidis and Lapata, 2018; Lutz et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 95, |
| "text": "Wang et al. (2016)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 579, |
| "end": 601, |
| "text": "(Kotzias et al., 2015;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 602, |
| "end": 633, |
| "text": "Pappas and Popescu-Belis, 2017;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 634, |
| "end": 661, |
| "text": "Angelidis and Lapata, 2018;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 662, |
| "end": 680, |
| "text": "Lutz et al., 2019)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiple Instance Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "To the best of our knowledge, applications of MIL in educational domains are rare, and we are not aware of any attempts to explicitly approach AES as a MIL task. The educational MIL work that we are aware of uses MIL to determine overall student performance given their trajectory over the duration of a course (Zafra et al., 2011) .", |
| "cite_spans": [ |
| { |
| "start": 311, |
| "end": 331, |
| "text": "(Zafra et al., 2011)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multiple Instance Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "By framing AES as a MIL problem, the goal becomes predicting, for each sentence, the score for that sentence, and then aggregating those sentencelevel predictions to create a document-level prediction. This goal requires determining both how to predict these sentence-level scores, and how to aggregate them into document-level scores. Note that we perform this task independently for each topic t 1 , . . . , t n , but this discussion is limited to a single topic for clarity. We define the AES task as follows. Assume we are given a collection of student essays D and corresponding scores y. We assume these scores are numeric and lie in a range defined by the rubric -we use integers, but continuous values could also work. For example, if the rubric for a concept defined the possible scores as Omitted/Incorrect, Partially Correct, and Correct, the corresponding entries in y could be drawn from {0, 1, 2}. The AES task is to predict y given D.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring with Multiple Instance Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The intuition for why MIL is appropriate for AES is that, for many kinds of topics, the content of a single sentence is sufficient to determine a score. For example, consider a psychology writing prompt that requires students to include the definition of a specific kind of therapy. If an essay includes a sentence that correctly defines that type of therapy, then the essay as a whole will receive a high score for that topic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring with Multiple Instance Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We approach the sentence-level scoring task using k-Nearest Neighbors (kNN) (Cover and Hart, 1967) . Denote the class label of a training example a as y a . For each document in our training corpus, we project each sentence into a semantic vector space, generating a corresponding vector that we denote as x. We assign to x the score of its parent document. We then train a kNN model on all of the sentences in the training corpus. We use the Euclidean distance as the metric for our nearest neighbor computations.", |
| "cite_spans": [ |
| { |
| "start": 76, |
| "end": 98, |
| "text": "(Cover and Hart, 1967)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring with Multiple Instance Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To predict the score of a new document using this model, we first split the document into sentences, project those sentences into our vector space, and use the kNN model to predict the score of each sentence. We define this sentence-level scoring function \u03c6 as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring with Multiple Instance Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\u03c6(x) = 1 k a\u2208knn(x) y a", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring with Multiple Instance Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where knn(x) denotes the set of k nearest neighbors of x. We aggregate these sentence-level scores through a document-level scoring function \u03b8:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring with Multiple Instance Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\u03b8(X i ) = agg x i,j \u2208X i (\u03c6(x i,j ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring with Multiple Instance Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where agg corresponds to either the maximum or the mean -that is, agg determines whether we are making the standard or collective assumption. We consider three semantic vector spaces. We define our vocabulary V as the set of all words appearing in the training sentences. The first vector space is a tf-idf space, in which each sentence is projected into R |V | and each dimension in that vector corresponds to the term frequency of the corresponding vocabulary term multiplied by the inverse of the number of documents that contained that term.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring with Multiple Instance Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We also consider a pretrained latent semantic analysis space. This space is constructed by using the singular value decomposition of the tf-idf matrix of a pretraining corpus to create a more compact representation of that tf-idf matrix (Landauer et al., 1998) .", |
| "cite_spans": [ |
| { |
| "start": 237, |
| "end": 260, |
| "text": "(Landauer et al., 1998)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring with Multiple Instance Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Finally, we consider embedding our sentences using SBERT (Reimers and Gurevych, 2019) . SBERT is a version of BERT (Devlin et al., 2019) that has been fine-tuned on the SNLI (Bowman et al., 2015) and Multi-Genre NLI (Williams et al., 2018) tasks. These tasks involves predicting how sentences relate to one another. Critically, this means that the SBERT network has been specifically fine-tuned to embed individual sentences into a common space.", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 85, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 115, |
| "end": 136, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 216, |
| "end": 239, |
| "text": "(Williams et al., 2018)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automated Essay Scoring with Multiple Instance Learning", |
| "sec_num": "4" |
| }, |
| { |
| "text": "While this kNN-MIL model is ultimately trained to predict document-level scores for essays, as a side effect, it also generates a score prediction for each sentence. The central idea is that we can directly use these sentence-level scores as weak signals of the presence of annotation spans in the sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weakly Supervised Localization", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Concretely, given our trained kNN-MIL model and an essay X i , we predict the presence of annotations as follows. Assume that the minimum and maximum scores allowed by the rubric for the given topic are S min and S max , respectively. We leverage the sentence-level scoring function \u03c6 to compute an annotation prediction function \u03b1:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weakly Supervised Localization", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u03b1(x i,j ) = \u03c6(x i,j ) \u2212 S min S max \u2212 S min", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weakly Supervised Localization", |
| "sec_num": "5" |
| }, |
| { |
| "text": "That is, our annotation prediction function \u03b1 is a rescaling of \u03c6 such that it lies in [0, 1], allowing us to interpret it as a normalized prediction of a sentence having an annotation. As our goal is to predict annotation spans without explicit annotation data, we also consider a modification of this process. Rather than training our kNN-MIL model on a corpus of scored student essays, we could instead use a set of manually curated reference sentences to train the model. We consider two sources of reference sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weakly Supervised Localization", |
| "sec_num": "5" |
| }, |
| { |
| "text": "First, we consider reference sentences pulled from the corresponding rubric, labeled by the topic they belong to. Rubrics often have descriptions of ideal answers and their key points, so generating such a set is low-cost. However, sentences from rubric descriptions may not discuss a topic in the same way that a student would, or they may fail to anticipate specific correct student answers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weakly Supervised Localization", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For these reasons, we also consider selecting reference sentences by manually picking sentences from the training essays. We consider all training essays that received the highest score on a topic as candidates and choose one to a few sentences that clearly address the topic. We specifically look for exemplars making different points and written in different ways. These identified sentences are manually labeled as belonging to the given topic, and each one is used as a different reference sentence when training our kNN-MIL model. Typically, just a few exemplars per topic is sufficient (Foltz et al., 2000) .", |
| "cite_spans": [ |
| { |
| "start": 592, |
| "end": 612, |
| "text": "(Foltz et al., 2000)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weakly Supervised Localization", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Whether we collect examples of formal wording from the rubric or informal wording from student answers, or both, we must then label the reference sentences for use in our kNN-MIL model. For a given topic, the references drawn from other topics provide negative examples of it. To convert these manual binary topic labels into the integer space that we use for the AES task, we assign to each reference sentence the maximum score for the topic(s) it was labeled as belonging to, and the minimum score to it for all other topics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weakly Supervised Localization", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The key benefit of our approach is that it never requires access to annotation training data. Instead, given a collection of student essays for a new prompt, training a kNN-MIL model for that prompt requires one of a few sources of data. If we have human-provided document-level scores for the topics we are interested in, we can train a kNN-MIL model on those labeled documents. Otherwise, if the rubric contains detailed enough reference sentences and descriptions for the various topics, we can train a kNN-MIL model using reference sentences collected from the rubric. And finally, we can have a human expert collect examples of the topics of interest from the essays, and then train a kNN-MIL model using those examples as reference sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Weakly Supervised Localization", |
| "sec_num": "5" |
| }, |
| { |
| "text": "To evaluate the performance of kNN-MIL, we need student essays that have both document-level scores and annotation spans. To the best of our knowledge, there is no publicly available dataset that contains both. Thus, we make use of an existing Pearson proprietary corpus developed to explore fine-grained content assessment for formative feedback. This corpus consists of student responses to four universitylevel psychology writing prompts. While the essays were originally written and scored against holistic writing traits, a subsequent annotation effort factored the content trait into multiple topics that represent core ideas or assertions an instructor would expect a student to address within the essay. For example, the topic Comparing Egocentrism from a prompt about Piaget's stages of development has the following reference answer: plete, Partial, Incorrect or Omitted. Additionally, they were asked to mark spans in the essay pertaining to the topic -these could be as short as a few words or as long as multiple sentences. Two psychology subject matter experts (SMEs) performed the rating and span selection tasks. Ideally, rating and span annotations would have also been adjudicated by a third SME. However, due to time and cost constraints, we lack adjudicated labels for three of the four prompts. For this reason, we ran our experiments on both annotators separately. As our techniques work at a sentence-level, but the human annotations can be shorter or longer than a single sentence, we frame the annotation prediction task as the task of predicting, for a given sentence, whether an annotation overlapped with that sentence. We show the distribution of interannotator agreements for the topics in the four prompts in the left panel of Figure 2 , calculated as the correlation between these sentence-level annotation labels. The annotators achieved reasonable reliability except on the Sensory prompt, where the median correlation was below 0.5, and one topic in the Piaget prompt, where the annotators had a correlation near 0.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1758, |
| "end": 1766, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The features of these four prompts are shown in Table 1 . Essays had 5-8 topics and covered areas such as the stages of sleep; the construction of a potential experimental study on aromatherapy; Piaget's stages of cognitive development; and graduated versus flooding approaches to exposure therapy for a hypothetical case of agoraphobia. Table 2 shows how many sentences were available for training the kNN-MIL models for each prompt.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 48, |
| "end": 55, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Our approach assumes that the topic scores are numeric. We convert the scores in this dataset by mapping both Omitted and Incorrect to 0, Partial to 1, and Complete to 2. As our approach uses these topic scores to generate annotation predictions, its ability to predict different annotations for different topics depends on the topic scores not being highly correlated. The right panel of Figure 2 shows the distribution of inter-topic correlations for each prompt. While there is considerable variation between the prompts, we do see that, except for one topic pair on the Piaget prompt, all intertopic correlations are less than 0.8, and the median correlations are all below 0.5.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 389, |
| "end": 398, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Our goal is to determine how well the kNN-MIL approaches perform on the annotation prediction task. We also want to verify that our approaches perform reasonably well on the essay scoring taskwhile we are not directly interested in essay scoring, if our approaches are incapable of predicting essay scores, that would indicate that the underlying assumptions of our kNN-MIL approaches are likely invalid. For each prompt, we construct 30 randomized train/test splits, holding out 20% of the data as the test set. We then train and evaluate our models on those splits, recording two key values: the corre-lation of the model's document-level scores to the human scorer, and the area under the ROC curve of the model's sentence-level annotation predictions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We compare results between three categories of models. The first is the kNN-MIL model, trained on the training set. We refer to this model as the Base kNN-MIL model. The second is the kNN-MIL model trained on a manually curated reference set, which we refer to as the Manual kNN-MIL model. Finally, we compare to the ordinal logistic regression-based approach presented in Woods et al. 2017, which we will refer to as the OLR model. Additionally, as a baseline for comparison on the annotation prediction task, we train a sentence-level kNN model directly on the human annotation data, which we refer to as the Annotation kNN model. We consider the Annotation kNN model to provide a rough upper bound on how well the kNN-MIL approaches can perform. Finally, for our kNN-MIL models, we investigate how varying k and the vector space impacts model performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We use the all-threshold ordinal logistic regression model from mord (Pedregosa-Izquierdo, 2015) and the part of speech tagger from spaCy (Honnibal and Montani, 2017) in our implementation of the OLR model. The Mahalanobis distance computation for this approach requires a known distribution of score changes, for this we use the distribution of score changes of the training set.", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 96, |
| "text": "(Pedregosa-Izquierdo, 2015)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 138, |
| "end": 166, |
| "text": "(Honnibal and Montani, 2017)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We use the kNN and tf-idf implementations from scikit-learn (Pedregosa et al., 2011) and the LSA implementation from gensim (\u0158eh\u016f\u0159ek and Sojka, 2010) . Our pretrained LSA space is 300 dimensional, and is trained on a collection of 45,108 English documents sampled from grade 3-12 readings and augmented with material from psychology textbooks. (Landauer et al., 1998) . After filtering very common and uncommon words, this space includes 37,013 terms, covering 85% of the terms appearing in the training data. ", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 84, |
| "text": "(Pedregosa et al., 2011)", |
| "ref_id": null |
| }, |
| { |
| "start": 124, |
| "end": 149, |
| "text": "(\u0158eh\u016f\u0159ek and Sojka, 2010)", |
| "ref_id": null |
| }, |
| { |
| "start": 344, |
| "end": 367, |
| "text": "(Landauer et al., 1998)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We present the average annotation prediction performance of the kNN-MIL models for different values of k in Figure 3 . While all approaches achieve AUCs above 0.5, the LSA-based space performs relatively poorly. The tf-idf space performs well, especially for the Base kNN-MIL model. In the tf-idf space, Base kNN-MIL performance peaks at k = 400. For the Manual kNN-MIL models, best performance occurs with the combined reference set using the tf-idf or SBERT spaces, around k = 10. Performance for Manual kNN-MIL with only rubric references or student references peaks and declines sooner than for combined due to the set of possible neighbors being smaller. Note that the substantial difference in k between Base kNN-MIL and Manual kNN-MIL is due to the fact that we have orders of magnitude fewer manual reference sentences than training set sentences.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 108, |
| "end": 116, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "In light of these results, for clarity in the rest of this discussion, we focus on k = 400 for Base kNN-MIL, k = 10 and the combined reference set for Manual kNN-MIL, and exclude the LSA space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "To determine how annotation prediction differs across model types, we show the average overall AUC of all models in Table 3 . In this table, we see that our best performance is achieved when we train a kNN model on actual annotation data. In contrast, the OLR model performs relatively poorly, suggesting that its success at predicting sentences that require some sort of feedback does not directly translate into an ability to predict locations of annotations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 116, |
| "end": 123, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Between the different kNN-MIL approaches, Base kNN-MIL using a tf-idf vector space performs best on three of the four prompts, and regardless of vector space, Base kNN-MIL performs as well or better than Manual kNN-MIL on those same three prompts. On the remaining prompt, Exposure Therapy, Manual kNN-MIL with SBERT performs best, but the differences between the various kNN-MIL approaches are relatively small on this prompt.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "These annotation predictions results show that the kNN-MIL approach performs well despite never being explicitly trained on the annotation prediction task. While the Base kNN-MIL approach is overall better than the Manual kNN-MIL approach, it also requires a large amount of scored data for training. Which kNN-MIL approach is best for a particular situation thus depends on if the additional performance gain of Base kNN-MIL is worth the added cost of obtaining essay scoring data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Finally, we show performance on the essay scoring task in Table 4 . On this task, the OLR model and the Base kNN-MIL model with a tf-idf space perform the best, and the Manual kNN-MIL models perform the worst. We had predicted that the standard MIL assumption would perform well for AES, and our results show that this is true -for both Base and Manual kNN-MIL, using the maximum sentence topic score in an answer outperforms using the mean sentence topic score.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 58, |
| "end": 65, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "The Base kNN-MIL model can perform relatively well at both the document scoring task and the annotation prediction task. This suggests that it could be used as an explainable AES model, as the annotation predictions are directly tied to the document-level scores it provides. In this quite different application, the localization would be used to explain the sentences contributing to the final score, rather than to provide context for formative feedback.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "We have presented a novel approach of using MIL to train annotation prediction models without access to annotation training data. This technique performs well and can allow for automated localization without expensive data annotation. It also performs relatively well on the document-level scoring task, suggesting that its sentence-level score predictions could be used as part of an explainable model for AES.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "9" |
| }, |
| { |
| "text": "Given that our kNN-MIL approach operates at the sentence level, it is unlikely to correctly locate annotations that exist across multiple sentences. Adapting our method to better incorporate information across sentences (e.g., by incorporating co- reference resolution) could help improve its overall performance. Additionally, as the Base kNN-MIL approach uses topics as negative examples for each other, we expect that it would not work well in situations where the inter-topic score correlations were high. We expect the Manual kNN-MIL approach to be less sensitive to this issue. Determining other ways to include negative examples would allow the Base kNN-MIL approach to be applied to prompts whose topics were highly correlated. In our current domain, psychology, and in the context of low-stakes formative feedback, incorrect answers are uncommon compared to omitted or partial answers. In contrast, for domains that require chained reasoning over more complex mental models, such as accounting, cell biology, or computer science, we expect the ability to correctly detect misconceptions and errors to be far more important. In general, future work is required to determine how well our approach will work in other domains, and which domains it is best suited to.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "9" |
| }, |
| { |
| "text": "Determining where topics are discussed is only one step in the full formative feedback process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "9" |
| }, |
| { |
| "text": "More work is required to determine the path from holistic scoring and topic localization to the most helpful kinds of feedback for a student. In particular, we need to consider different kinds of pedagogical feedback and how such feedback could be individualized. Additionally, we could provide not just text but also video, peer interaction, worked examples, and other approaches from the full panoply of potential pedagogical interventions. Finally, we need to decide what actions will help the student the most, which relies on our pedagogical theory of how to help a student achieve their current instructional objectives.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "9" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank Alok Baikadi, Julio Bradford, Jill Budden, Amy Burkhardt, Dave Farnham, Andrew Gorman and Jorge Roccatagliata for their efforts in collecting the annotated dataset used in this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Multiple instance learning networks for Fine-Grained sentiment analysis", |
| "authors": [ |
| { |
| "first": "Stefanos", |
| "middle": [], |
| "last": "Angelidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "17--31", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefanos Angelidis and Mirella Lapata. 2018. Multi- ple instance learning networks for Fine-Grained sen- timent analysis. Transactions of the Association for Computational Linguistics, 6:17-31.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Latent dirichlet allocation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael I Jordan", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "993--1022", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David M Blei, Andrew Y Ng, and Michael I Jordan. 2003. Latent dirichlet allocation. Journal of Ma- chine Learning Research., 3(Jan):993-1022.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A large annotated corpus for learning natural language inference", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Potts", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large anno- tated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP). Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Learning to extract relations from the web using minimal supervision", |
| "authors": [ |
| { |
| "first": "Razvan", |
| "middle": [], |
| "last": "Bunescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "576--583", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Razvan Bunescu and Raymond Mooney. 2007. Learn- ing to extract relations from the web using mini- mal supervision. In Proceedings of the 45th An- nual Meeting of the Association of Computational Linguistics, pages 576-583.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Finding the write stuff: Automatic identification of discourse structure in student essays. Intelligent Systems", |
| "authors": [ |
| { |
| "first": "Jill", |
| "middle": [], |
| "last": "Burstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "18", |
| "issue": "", |
| "pages": "32--39", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/MIS.2003.1179191" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jill Burstein, Daniel Marcu, and Kevin Knight. 2003. Finding the write stuff: Automatic identification of discourse structure in student essays. Intelligent Sys- tems, IEEE, 18:32 -39.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Unsupervised concept annotation using latent dirichlet allocation and segmental methods", |
| "authors": [ |
| { |
| "first": "Nathalie", |
| "middle": [], |
| "last": "Camelin", |
| "suffix": "" |
| }, |
| { |
| "first": "Boris", |
| "middle": [], |
| "last": "Detienne", |
| "suffix": "" |
| }, |
| { |
| "first": "St\u00e9phane", |
| "middle": [], |
| "last": "Huet", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the First workshop on Unsupervised Learning in NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "72--81", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nathalie Camelin, Boris Detienne, St\u00e9phane Huet, Do- minique Quadri, and Fabrice Lef\u00e8vre. 2011. Un- supervised concept annotation using latent dirichlet allocation and segmental methods. In Proceedings of the First workshop on Unsupervised Learning in NLP, pages 72-81.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Multiple instance learning: A survey of problem characteristics and applications", |
| "authors": [ |
| { |
| "first": "Marc-Andr\u00e9", |
| "middle": [], |
| "last": "Carbonneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Veronika", |
| "middle": [], |
| "last": "Cheplygina", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Granger", |
| "suffix": "" |
| }, |
| { |
| "first": "Ghyslain", |
| "middle": [], |
| "last": "Gagnon", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.patcog.2017.10.009" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc-Andr\u00e9 Carbonneau, Veronika Cheplygina, Eric Granger, and Ghyslain Gagnon. 2016. Multiple in- stance learning: A survey of problem characteristics and applications. Pattern Recognition.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Causal discourse analyzer: improving automated feedback on academic ESL writing", |
| "authors": [ |
| { |
| "first": "Evgeny", |
| "middle": [], |
| "last": "Chukharev-Hudilainen", |
| "suffix": "" |
| }, |
| { |
| "first": "Aysel", |
| "middle": [], |
| "last": "Saricaoglu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Computer Assisted Language Learning", |
| "volume": "29", |
| "issue": "3", |
| "pages": "494--516", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Evgeny Chukharev-Hudilainen and Aysel Saricaoglu. 2016. Causal discourse analyzer: improving auto- mated feedback on academic ESL writing. Com- puter Assisted Language Learning, 29(3):494-516.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Nearest neighbor pattern classification", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Cover", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Hart", |
| "suffix": "" |
| } |
| ], |
| "year": 1967, |
| "venue": "IEEE Transactions on Information Theory", |
| "volume": "13", |
| "issue": "1", |
| "pages": "21--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Cover and P. Hart. 1967. Nearest neighbor pattern classification. IEEE Transactions on Information Theory, 13(1):21-27.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "BERT: pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/n19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Pa- pers), pages 4171-4186. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Solving the multiple instance problem with axis-parallel rectangles", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [ |
| "G" |
| ], |
| "last": "Dietterich", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [ |
| "H" |
| ], |
| "last": "Lathrop", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom\u00e1s", |
| "middle": [], |
| "last": "Lozano-P\u00e9rez", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Artificial Intelligence", |
| "volume": "89", |
| "issue": "1-2", |
| "pages": "31--71", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/S0004-3702(96)00034-3" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas G. Dietterich, Richard H. Lathrop, and Tom\u00e1s Lozano-P\u00e9rez. 1997. Solving the multiple instance problem with axis-parallel rectangles. Artificial In- telligence, 89(1-2):31-71.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Attentionbased recurrent convolutional neural network for automatic essay scoring", |
| "authors": [ |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 21st Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "153--162", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fei Dong, Yue Zhang, and Jie Yang. 2017. Attention- based recurrent convolutional neural network for au- tomatic essay scoring. In Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017), pages 153-162.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Supporting Content-Based feedback in On-Line writing evaluation with LSA", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott A", |
| "middle": [], |
| "last": "Gilliam", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kendall", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Interactive Learning Environments", |
| "volume": "8", |
| "issue": "2", |
| "pages": "111--127", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter W Foltz, Sara Gilliam, and Scott A Kendall. 2000. Supporting Content-Based feedback in On- Line writing evaluation with LSA. Interactive Learning Environments, 8(2):111-127.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Summary street R : Computer support for comprehension and writing", |
| "authors": [ |
| { |
| "first": "Marita", |
| "middle": [], |
| "last": "Franzke", |
| "suffix": "" |
| }, |
| { |
| "first": "Eileen", |
| "middle": [], |
| "last": "Kintsch", |
| "suffix": "" |
| }, |
| { |
| "first": "Donna", |
| "middle": [], |
| "last": "Caccamise", |
| "suffix": "" |
| }, |
| { |
| "first": "Nina", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Dooley", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Journal of Educational Computing Research", |
| "volume": "33", |
| "issue": "", |
| "pages": "53--80", |
| "other_ids": { |
| "DOI": [ |
| "10.2190/DH8F-QJWM-J457-FQVB" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marita Franzke, Eileen Kintsch, Donna Caccamise, Nina Johnson, and Scott Dooley. 2005. Summary street R : Computer support for comprehension and writing. Journal of Educational Computing Re- search, 33:53-80.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Informing Writing: The Benefits of Formative Assessment. A Report from Carnegie Corporation of New York", |
| "authors": [ |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Harris", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Hebert", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steve Graham, Karen Harris, and Michael Hebert. 2011. Informing Writing: The Benefits of Forma- tive Assessment. A Report from Carnegie Corpora- tion of New York. Carnegie Corporation of New York.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "2017. spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Honnibal", |
| "suffix": "" |
| }, |
| { |
| "first": "Ines", |
| "middle": [], |
| "last": "Montani", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Honnibal and Ines Montani. 2017. spaCy 2: Natural language understanding with Bloom embed- dings, convolutional neural networks and incremen- tal parsing. To appear.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "From group to individual labels using deep features", |
| "authors": [ |
| { |
| "first": "Dimitrios", |
| "middle": [], |
| "last": "Kotzias", |
| "suffix": "" |
| }, |
| { |
| "first": "Misha", |
| "middle": [], |
| "last": "Denil", |
| "suffix": "" |
| }, |
| { |
| "first": "Padhraic", |
| "middle": [], |
| "last": "Nando De Freitas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smyth", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 21th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "597--606", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dimitrios Kotzias, Misha Denil, Nando de Freitas, and Padhraic Smyth. 2015. From group to individual la- bels using deep features. In Proceedings of the 21th ACM SIGKDD International Conference on Knowl- edge Discovery and Data Mining, pages 597-606. ACM.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "An introduction to latent semantic analysis", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Landauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Darrell", |
| "middle": [], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Laham", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Discourse processes", |
| "volume": "25", |
| "issue": "", |
| "pages": "259--284", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas K Landauer, Peter W. Foltz, and Darrell La- ham. 1998. An introduction to latent semantic anal- ysis. Discourse processes, 25(2-3):259-284.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Sentence-level sentiment analysis of financial news using distributed text representations and multi-instance learning", |
| "authors": [ |
| { |
| "first": "Bernhard", |
| "middle": [], |
| "last": "Lutz", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Pr\u00f6llochs", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 52nd Hawaii International Conference on System Sciences", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.24251/HICSS.2019.137" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bernhard Lutz, Nicolas Pr\u00f6llochs, and Dirk Neumann. 2019. Sentence-level sentiment analysis of finan- cial news using distributed text representations and multi-instance learning. In Proceedings of the 52nd Hawaii International Conference on System Sci- ences.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "The imminence of grading essays by computer. The Phi Delta Kappan", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Ellis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Page", |
| "suffix": "" |
| } |
| ], |
| "year": 1966, |
| "venue": "", |
| "volume": "47", |
| "issue": "", |
| "pages": "238--243", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellis B Page. 1966. The imminence of grading essays by computer. The Phi Delta Kappan, 47(5):238- 243.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Explicit document modeling through weighted multiple-instance learning", |
| "authors": [ |
| { |
| "first": "Nikolaos", |
| "middle": [], |
| "last": "Pappas", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei", |
| "middle": [], |
| "last": "Popescu-Belis", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Journal of Artificial Intelligenece Research", |
| "volume": "58", |
| "issue": "1", |
| "pages": "591--626", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikolaos Pappas and Andrei Popescu-Belis. 2017. Explicit document modeling through weighted multiple-instance learning. Journal of Artificial In- telligenece Research., 58(1):591-626.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "The nature of feedback: How peer feedback features affect students' implementation rate and quality of revisions", |
| "authors": [ |
| { |
| "first": "Melissa", |
| "middle": [], |
| "last": "Patchan", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Schunn", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Correnti", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Journal of Educational Psychology", |
| "volume": "108", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1037/edu0000103" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Melissa Patchan, Christian Schunn, and Richard Cor- renti. 2016. The nature of feedback: How peer feed- back features affect students' implementation rate and quality of revisions. Journal of Educational Psy- chology, 108.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Scikit-learn: Machine Learning in Python", |
| "authors": [ |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Perrot", |
| "suffix": "" |
| }, |
| { |
| "first": "Duchesnay", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2825--2830", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthieu Perrot, and\u00c9douard Duchesnay. 2011. Scikit-learn: Machine Learning in Python. Journal of Machine Learning Research, 12(Oct):2825-2830.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Feature extraction and supervised learning on fMRI : from practice to theory. Theses", |
| "authors": [ |
| { |
| "first": "Fabian", |
| "middle": [], |
| "last": "Pedregosa-Izquierdo", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabian Pedregosa-Izquierdo. 2015. Feature extraction and supervised learning on fMRI : from practice to theory. Theses, Universit\u00e9 Pierre et Marie Curie - Paris VI.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Software Framework for Topic Modelling with Large Corpora", |
| "authors": [ |
| { |
| "first": "Petr", |
| "middle": [], |
| "last": "Radim\u0159eh\u016f\u0159ek", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sojka", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the LREC 2010 Workshop on New Challenges for NLP Frameworks", |
| "volume": "", |
| "issue": "", |
| "pages": "45--50", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Radim\u0158eh\u016f\u0159ek and Petr Sojka. 2010. Software Frame- work for Topic Modelling with Large Corpora. In Proceedings of the LREC 2010 Workshop on New Challenges for NLP Frameworks, pages 45-50, Val- letta, Malta. ELRA.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
| "authors": [ |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Reimers", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Developing pedagogically-guided threshold algorithms for intelligent automated essay feedback", |
| "authors": [ |
| { |
| "first": "Rod", |
| "middle": [], |
| "last": "Roscoe", |
| "suffix": "" |
| }, |
| { |
| "first": "Danica", |
| "middle": [], |
| "last": "Kugler", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Scott", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [ |
| "L" |
| ], |
| "last": "Crossley", |
| "suffix": "" |
| }, |
| { |
| "first": "Danielle", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mcnamara", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 25th International Florida Artificial Intelligence Research Society Conference, FLAIRS-25", |
| "volume": "", |
| "issue": "", |
| "pages": "466--471", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rod Roscoe, Danica Kugler, Scott A Crossley, Jen- nifer L Weston, and Danielle McNamara. 2012. De- veloping pedagogically-guided threshold algorithms for intelligent automated essay feedback. In Pro- ceedings of the 25th International Florida Artificial Intelligence Research Society Conference, FLAIRS- 25, pages 466-471.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Automated essay scoring: A cross-disciplinary perspective", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Mark", |
| "suffix": "" |
| }, |
| { |
| "first": "Jill", |
| "middle": [ |
| "C" |
| ], |
| "last": "Shermis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Burstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark D. Shermis and Jill C. Burstein, editors. 2003. Automated essay scoring: A cross-disciplinary per- spective. Lawrence Erlbaum Associates, Inc., Mah- way, NJ.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Handbook of automated essay evaluation: Current applications and new directions. Routledge", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Mark", |
| "suffix": "" |
| }, |
| { |
| "first": "Jill", |
| "middle": [ |
| "C" |
| ], |
| "last": "Shermis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Burstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark D. Shermis and Jill C. Burstein, editors. 2013. Handbook of automated essay evaluation: Current applications and new directions. Routledge, New York.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Summary street: An intelligent tutoring system for improving student writing through the use of latent semantic analysis. Doctor of philosophy (thesis)", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [ |
| "J" |
| ], |
| "last": "Steinhart", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David J. Steinhart. 2001. Summary street: An intelli- gent tutoring system for improving student writing through the use of latent semantic analysis. Doc- tor of philosophy (thesis), University of Colorado at Boulder.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "A multiple instance learning framework for identifying key sentences and detecting events", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Ning", |
| "suffix": "" |
| }, |
| { |
| "first": "Huzefa", |
| "middle": [], |
| "last": "Rangwala", |
| "suffix": "" |
| }, |
| { |
| "first": "Naren", |
| "middle": [], |
| "last": "Ramakrishnan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 25th ACM International on Conference on Information and Knowledge Management", |
| "volume": "", |
| "issue": "", |
| "pages": "509--518", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Wang, Yue Ning, Huzefa Rangwala, and Naren Ramakrishnan. 2016. A multiple instance learning framework for identifying key sentences and detect- ing events. In Proceedings of the 25th ACM Inter- national on Conference on Information and Knowl- edge Management, pages 509-518. ACM.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "A broad-coverage challenge corpus for sentence understanding through inference", |
| "authors": [ |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Nangia", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1112--1122", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1101" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of the 2018 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122, New Orleans, Louisiana. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Formative essay feedback using predictive scoring models", |
| "authors": [ |
| { |
| "first": "Bronwyn", |
| "middle": [], |
| "last": "Woods", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Adamson", |
| "suffix": "" |
| }, |
| { |
| "first": "Shayne", |
| "middle": [], |
| "last": "Miel", |
| "suffix": "" |
| }, |
| { |
| "first": "Elijah", |
| "middle": [], |
| "last": "Mayfield", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '17", |
| "volume": "", |
| "issue": "", |
| "pages": "2071--2080", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3097983.3098160" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bronwyn Woods, David Adamson, Shayne Miel, and Elijah Mayfield. 2017. Formative essay feedback using predictive scoring models. In Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '17, page 2071-2080, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Peak: Pyramid evaluation via automated knowledge extraction", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerard", |
| "middle": [ |
| "De" |
| ], |
| "last": "Melo", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Thirtieth AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian Yang, Rebecca J Passonneau, and Gerard De Melo. 2016. Peak: Pyramid evaluation via au- tomated knowledge extraction. In Thirtieth AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Multiple instance learning for classifying students in learning management systems", |
| "authors": [ |
| { |
| "first": "Amelia", |
| "middle": [], |
| "last": "Zafra", |
| "suffix": "" |
| }, |
| { |
| "first": "Crist\u00f3bal", |
| "middle": [], |
| "last": "Romero", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebasti\u00e1n", |
| "middle": [], |
| "last": "Ventura", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Expert Systems with Applications", |
| "volume": "38", |
| "issue": "12", |
| "pages": "15020--15031", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amelia Zafra, Crist\u00f3bal Romero, and Sebasti\u00e1n Ven- tura. 2011. Multiple instance learning for clas- sifying students in learning management systems. Expert Systems with Applications, 38(12):15020- 15031.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "text": "Screenshot from an annotation tool containing an example essay with colored text indicating humanprovided annotations (left), the color-coded annotation key (top right) and holistic scores (bottom right).", |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "uris": null, |
| "text": "child in the pre-operational stage is unable to see things from another person's point of view, whereas a child in the concrete operational stage can. Annotators were tasked with assigning an essaylevel rating for each topic with a judgment of Com-s ta g e s -o f-s le e p s e n s o ry p ia g e t-s ta g e s e x p o s u re -t h e ra p", |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "num": null, |
| "uris": null, |
| "text": "Box plots of inter-annotator correlations of the sentence-level annotation labels for each topic (left) and correlation between scores for all topic pairs (right).", |
| "type_str": "figure" |
| }, |
| "FIGREF3": { |
| "num": null, |
| "uris": null, |
| "text": "Annotation prediction performance of the kNN-MIL models as k is varied, averaged across all prompts, concepts, and annotators. Error bars omitted for clarity.", |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>Prompt</td><td colspan=\"3\">Rubric Student Training</td></tr><tr><td>Sleep Stages</td><td>15</td><td>19</td><td>4741</td></tr><tr><td>Sensory Study</td><td>11</td><td>13</td><td>5362</td></tr><tr><td>Piaget Stages</td><td>26</td><td>22</td><td>6342</td></tr><tr><td colspan=\"2\">Exposure Therapy 20</td><td>48</td><td>5184</td></tr></table>", |
| "html": null, |
| "text": "Characteristics and summary statistics of prompts used in the experiments. The Annotator columns indicate, for a specific topic, the average percentage of sentences annotated with that topic." |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "html": null, |
| "text": "Number of sentences available for kNN-MIL training. The Rubric column shows the number of reference sentences taken from the rubric, while the Student column shows the number manually chosen from the student essays. The Training column shows the total number of sentences in the full set of essays." |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>Model</td><td>agg</td><td colspan=\"3\">Space Exposure Therapy Piaget Stages Sensory Study Sleep Stages</td></tr><tr><td>Base kNN-MIL</td><td>max mean</td><td>sbert 0.49 (0.14) tfidf 0.47 (0.12) sbert 0.39 (0.15) tfidf 0.40 (0.14)</td><td>0.51 (0.18) 0.41 (0.15) 0.61 (0.19) 0.52 (0.17) 0.44 (0.16) 0.36 (0.15) 0.52 (0.16) 0.46 (0.14)</td><td>0.60 (0.11) 0.67 (0.12) 0.61 (0.14) 0.63 (0.13)</td></tr><tr><td>Manual kNN-MIL</td><td>max mean</td><td>sbert 0.41 (0.15) tfidf 0.38 (0.14) sbert 0.29 (0.15) tfidf 0.29 (0.16)</td><td>0.30 (0.18) 0.25 (0.15) 0.40 (0.15) 0.23 (0.16) 0.23 (0.15) 0.16 (0.15) 0.29 (0.13) 0.19 (0.16)</td><td>0.37 (0.14) 0.34 (0.18) 0.27 (0.14) 0.22 (0.20)</td></tr><tr><td>OLR</td><td/><td>0.50 (0.18)</td><td>0.63 (0.16) 0.51 (0.18)</td><td>0.69 (0.14)</td></tr></table>", |
| "html": null, |
| "text": "Area under the ROC curve on the annotation prediction task, averaged over all topics and annotators. Standard deviation shown in parentheses." |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "html": null, |
| "text": "Pearson correlation coefficients on the document-level scoring task, averaged over all topics. Standard deviation shown in parentheses." |
| } |
| } |
| } |
| } |