| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T01:10:37.719033Z" |
| }, |
| "title": "Automated Classification of Written Proficiency Levels on the CEFR-Scale through Complexity Contours and RNNs", |
| "authors": [ |
| { |
| "first": "Elma", |
| "middle": [], |
| "last": "Kerz", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "RWTH Aachen University", |
| "location": {} |
| }, |
| "email": "elma.kerz@ifaar.rwth-aachen.de" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Wiechmann", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Amsterdam", |
| "location": {} |
| }, |
| "email": "d.wiechmann@uva.nl" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Qiao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "RWTH Aachen University", |
| "location": {} |
| }, |
| "email": "yu.qiao@rwth-aachen.de" |
| }, |
| { |
| "first": "Emma", |
| "middle": [], |
| "last": "Tseng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Washington", |
| "location": {} |
| }, |
| "email": "eftseng@uw.edu" |
| }, |
| { |
| "first": "Marcus", |
| "middle": [], |
| "last": "Str\u00f6bel", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "RWTH Aachen University", |
| "location": {} |
| }, |
| "email": "marcus.stroebel@rwth-aachen.de" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Automatically predicting the level of second language (L2) learner proficiency is an emerging topic of interest and research based on machine learning approaches to language learning and development. The key to the present paper is the combined use of what we refer to as 'complexity contours', a series of measurements of indices of L2 proficiency obtained by a computational tool that implements a sliding window technique, and recurrent neural network (RNN) classifiers that adequately capture the sequential information in those contours. We used the EF-Cambridge Open Language Database (Geertzen et al., 2014) with its labelled Common European Framework of Reference (CEFR) levels (Council of Europe, 2018) to predict six classes of L2 proficiency levels (A1, A2, B1, B2, C1, C2) in the assessment of writing skills. Our experiments demonstrate that an RNN classifier trained on complexity contours achieves higher classification accuracy than one trained on text-average complexity scores. In a secondary experiment, we determined the relative importance of features from four distinct categories through a sensitivity-based pruning technique. Our approach makes an important contribution to the field of automated identification of language proficiency levels, more specifically, to the increasing efforts towards the empirical validation of CEFR levels.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Automatically predicting the level of second language (L2) learner proficiency is an emerging topic of interest and research based on machine learning approaches to language learning and development. The key to the present paper is the combined use of what we refer to as 'complexity contours', a series of measurements of indices of L2 proficiency obtained by a computational tool that implements a sliding window technique, and recurrent neural network (RNN) classifiers that adequately capture the sequential information in those contours. We used the EF-Cambridge Open Language Database (Geertzen et al., 2014) with its labelled Common European Framework of Reference (CEFR) levels (Council of Europe, 2018) to predict six classes of L2 proficiency levels (A1, A2, B1, B2, C1, C2) in the assessment of writing skills. Our experiments demonstrate that an RNN classifier trained on complexity contours achieves higher classification accuracy than one trained on text-average complexity scores. In a secondary experiment, we determined the relative importance of features from four distinct categories through a sensitivity-based pruning technique. Our approach makes an important contribution to the field of automated identification of language proficiency levels, more specifically, to the increasing efforts towards the empirical validation of CEFR levels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The Common European Framework of Reference (CEFR) is an internationally recognized standard for describing language proficiency based on six reference levels -A1, A2, B1, B2, C1 and C2 -the same letter pairs corresponding to a three level distinction between beginner, intermediate and advanced (Council of Europe, 2018) . Each proficiency level is related to specific linguistic features and skills, establishing a progression from rudimentary language to varied and sophisticated language. The CEFR descriptors, available for all four fundamental language skills (receptive skills: reading & listening and productive skills: writing & speaking), describe the expected competencies in terms of functional can-do statements. For example, a learner at a vantage or upper intermediate B2 level in the domain of writing is expected to have \"a sufficient range of language to be able to give clear descriptions, express viewpoints and develop arguments without much conspicuous searching for words, using some complex sentence forms to do so\" (Council of Europe, 2018: 131). The can-do descriptors formulated for each of the six CEFR proficiency levels are typically vague and subjective and are only useful for orientation purposes. Thus, there is an urgent need for research on empirical validation of CEFR levels at the interface between areas of language learning, testing and assessment, and natural language processing and machine learning (Wisniewski, 2017) .", |
| "cite_spans": [ |
| { |
| "start": 295, |
| "end": 320, |
| "text": "(Council of Europe, 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1442, |
| "end": 1460, |
| "text": "(Wisniewski, 2017)", |
| "ref_id": "BIBREF57" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A closely related line of research has been directed toward automated essay scoring (AES) (for overviews see (Higgins et al., 2015; Ke and Ng, 2019; Klebanov and Madnani, 2020) ). This line of research has benefited from the increasing availability of publicly accessible learner corpora of L2 writing, such as CLC-FCE (Yannakoudakis et al., 2011) , TOEFL11 (Blanchard et al., 2013) , MERLIN (Boyd et al., 2014) and EFCAMDAT (Geertzen et al., 2014) . Supervised approaches to AES have recast the task as (1) a regression task aimed at predicting the score of an essay (Yannakoudakis et al., 2011; Klebanov and Flor, 2013) , (2) a classification task aimed at classifying a text as belonging to one of a specified number of classes, e.g. the three score levels (low, medium or high) in the TOEFLL11 corpus or the six CEFR levels (A1-C2) in the MERLIN corpus (Hancke and Meurers, 2013; Vajjala and Rama, 2018; Wei\u00df and Meurers, 2018; Caines and Buttery, 2020) 1 or (3) a ranking task aimed at ranking two or more texts based on their quality (Yannakoudakis and Briscoe, 2012; Taghipour and Ng, 2016) . Previous work on AES has taken both feature-based approaches and neural approaches (see (Ke and Ng, 2019) for a discussion of (dis)advantages of these two approaches). The features employed are diverse, ranging from the use of descriptive metrics of the text related to word or sentence length to more abstract features related to proficiency development in the area of (second) language learning ( (Vajjala, 2018) for a recent overview). The existing studies that have used a feature-based approach have typically relied on text averages of a given feature. However, the use of such aggregate scores may obscure the considerable degree of variation in distribution of feature values within the text.", |
| "cite_spans": [ |
| { |
| "start": 109, |
| "end": 131, |
| "text": "(Higgins et al., 2015;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 132, |
| "end": 148, |
| "text": "Ke and Ng, 2019;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 149, |
| "end": 176, |
| "text": "Klebanov and Madnani, 2020)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 319, |
| "end": 347, |
| "text": "(Yannakoudakis et al., 2011)", |
| "ref_id": "BIBREF60" |
| }, |
| { |
| "start": 358, |
| "end": 382, |
| "text": "(Blanchard et al., 2013)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 392, |
| "end": 411, |
| "text": "(Boyd et al., 2014)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 425, |
| "end": 448, |
| "text": "(Geertzen et al., 2014)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 568, |
| "end": 596, |
| "text": "(Yannakoudakis et al., 2011;", |
| "ref_id": "BIBREF60" |
| }, |
| { |
| "start": 597, |
| "end": 621, |
| "text": "Klebanov and Flor, 2013)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 857, |
| "end": 883, |
| "text": "(Hancke and Meurers, 2013;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 884, |
| "end": 907, |
| "text": "Vajjala and Rama, 2018;", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 908, |
| "end": 931, |
| "text": "Wei\u00df and Meurers, 2018;", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 932, |
| "end": 957, |
| "text": "Caines and Buttery, 2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1040, |
| "end": 1073, |
| "text": "(Yannakoudakis and Briscoe, 2012;", |
| "ref_id": "BIBREF59" |
| }, |
| { |
| "start": 1074, |
| "end": 1097, |
| "text": "Taghipour and Ng, 2016)", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 1188, |
| "end": 1205, |
| "text": "(Ke and Ng, 2019)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1499, |
| "end": 1514, |
| "text": "(Vajjala, 2018)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we present experiments geared towards the automated assessment of written language proficiency of non-native learners (L2) of English. For the experiments, we take advantage of the EF-Cambridge Open Language Database (EF-CAMDAT, (Geertzen et al., 2014) ), a large-scale learner corpus consisting of 1.8 million texts labeled with the six CEFR proficiency levels (A1-C2). The aim of the paper is twofold: (1) to apply a sliding window technique in a feature-based modeling approach to automated proficiency classification and (2) to determine what features contribute the most to the classification accuracy. The features employed in this paper are derived from numerous studies in the field of L2 acquisition centering around the notion of 'complexity' 2 (see e.g. (Lu, 2010a (Lu, , 2012 Bult\u00e9 and Housen, 2012) ). The inclusion of such features is further motivated by the fact that, according to the CEFR descriptors, learners are expected to acquire the ability to produce increasingly varied and sophisticated written language, as they progress through the six proficiency levels. Such diverse and sophisticated language use should be evident not only in vocabulary growth, but also in the choice of individual 1 The latter paper has been published in the context of a recent shared task on Language Proficiency Scoring at the LREC 2020 -REPROLANG Task D.2 https://lrec2020.lrecconf.org/en/reprolang2020/selected-tasks/", |
| "cite_spans": [ |
| { |
| "start": 244, |
| "end": 267, |
| "text": "(Geertzen et al., 2014)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 780, |
| "end": 790, |
| "text": "(Lu, 2010a", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 791, |
| "end": 802, |
| "text": "(Lu, , 2012", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 803, |
| "end": 826, |
| "text": "Bult\u00e9 and Housen, 2012)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1230, |
| "end": 1231, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2 Complexity -commonly defined as \"the range of forms that surface in language production and the degree of sophistication of such forms\" (Ortega 2003:492) -is one of the three dimensions of the 'Complexity-Accuracy-Fluency' triad that has emerged as a prominent conceptual framework for L2 assessment (see e.g. (Wolfe-Quintero et al., 1998; Larsen-Freeman, 2006) words and multi-word phrases, and in the complexity of sentence, clause, and phrase structures. Through the sliding window technique we obtain a series of measurements for a given feature tracking the progression of complexity within a text in a sentence-by-sentence fashion. We refer to such series of measurements as 'complexity contours'. These contours are then fed into recurrent neural network (RNN) classifiers -adequate to take into account the sequential information in the contours -to perform grade-level classification tasks. We demonstrate the utility of the approach by comparing the performance of 'contour-based' RNN models against those of 'means-based' RNN models trained on text-average performance scores.", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 155, |
| "text": "(Ortega 2003:492)", |
| "ref_id": null |
| }, |
| { |
| "start": 312, |
| "end": 341, |
| "text": "(Wolfe-Quintero et al., 1998;", |
| "ref_id": "BIBREF58" |
| }, |
| { |
| "start": 342, |
| "end": 363, |
| "text": "Larsen-Freeman, 2006)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In a second step, we determine what features drive classification accuracy through a Sensitivity-Based Pruning (SBP) technique. The approach taken in this paper was already successfully applied in the area of first language (L1) writing development. Kerz et al. (2020) showed that RNN classifiers trained on complexity contours achieve higher classification accuracy in predicting secondary school children's grade levels in both English and German (second-, sixth-, ninth-and eleventh-grade in English schools and fifth-and ninth-grade in German). Here we set out to extend the approach to automated proficiency classification in L2 English. The remainder of the paper is organized as follows: In Section 2, we provide a concise overview of related work. Section 3 presents the dataset and Section 4 the features used in the experiments. Section 5 describes the sliding-window approach to generating complexity contours. Sections 6 presents the architecture of the RNNs and the training procedure. Sections 7 introduces the SBP method used to determine the relative feature importance. Section 8 reports the results before conclusions are drawn in Section 9 along with indications of future research directions.", |
| "cite_spans": [ |
| { |
| "start": 250, |
| "end": 268, |
| "text": "Kerz et al. (2020)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we present two types of previous work: L2 studies that have investigated the relationship between certain linguistic features and proficiency levels, and those that have used supervised machine learning approaches to predict learner proficiency on the CEFR scale.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Numerous studies reveal that syntactic complexity can be considered as one of the key skills that strongly influence L2 proficiency (see e.g. (Ortega, 2003) for a synthesis of twenty-five studies, see also (Kuiken et al., 2019) for a recent special issue). These studies have measured this multidimensional construct along both global features, such as length measures and subordination ratios, as well as more specific features pertaining to the usage of particular structures. For example, Lu (2011) conducted an evaluation of 14 features of syntactic complexity in a corpus of English essays written by Chinese L1 students and found that the complexity measures that best discriminated between proficiency levels were the number of complex nominals per sentence and the mean sentence length. Another series of studies indicate the importance of lexical complexity (aka lexical richness) subsuming its three sub-dimensions (lexical density, sophistication and variation) in the assessment of L2 proficiency (see (Lu, 2012) ). For example, Kyle and Crossley (2014) showed 47.5% of the variance in holistic scores of lexical proficiency and 48.7% of the variance in holistic scores of speaking proficiency can be explained using a range of lexical sophistication indices. This study also introduced the use of multi-word sequences (MWS) as an indicator of language proficiency, operationalized in terms of register-specific n-gram measures (bigrams and trigrams). The inclusion of such features reflects the growing interest of MWS in language learning and development. This interest stems from an extensive body of evidence demonstrating that both child and adult populations, including adult second-language learner populations, can develop the sensitivity to the statistics of MWS and rely on knowledge of such statistics to facilitate their language processing and boost their acquisition (for overviews see e.g. (Shaoul and Westbury, 2011 ; Christiansen and Arnon, 2017)). Garner et al. (2020) , for instance, investigated the relationship of the usage of MWS and human judgments of writing proficiency based on the CEFR-graded Yonsei English Learner Corpus (Rhee and Jung, 2014) and found that essays from higher CEFR levels include a greater proportion of frequent academic trigrams and more strongly associated spoken trigrams. Finally, in recent years the L2 literature has introduced information-theoretic features based on Kolmogorov complexity (Ehret, 2016; Ehret and Szmrecsanyi, 2019) . Ehret and Szmrecsanyi (2019) investigated essays written by advanced learners of English from International Corpus of Learner English (Granger et al., 2002) and showed that more advanced learners use considerably more complex texts than beginner learners, although this tendency is not always reflected in a clear, linear relationship between proficiency and complexity.", |
| "cite_spans": [ |
| { |
| "start": 142, |
| "end": 156, |
| "text": "(Ortega, 2003)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 206, |
| "end": 227, |
| "text": "(Kuiken et al., 2019)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1014, |
| "end": 1024, |
| "text": "(Lu, 2012)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 1041, |
| "end": 1065, |
| "text": "Kyle and Crossley (2014)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1917, |
| "end": 1943, |
| "text": "(Shaoul and Westbury, 2011", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 1978, |
| "end": 1998, |
| "text": "Garner et al. (2020)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 2456, |
| "end": 2469, |
| "text": "(Ehret, 2016;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 2470, |
| "end": 2498, |
| "text": "Ehret and Szmrecsanyi, 2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 2501, |
| "end": 2529, |
| "text": "Ehret and Szmrecsanyi (2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 2635, |
| "end": 2657, |
| "text": "(Granger et al., 2002)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Studies that have employed supervised machine learning approaches to predict proficiency on the CEFR scale for different L2s have used numerous linguistic features in combination with a host of classifiers (Hancke and Meurers, 2013; Vajjala and Rama, 2018; Vajjala and L\u00f5o, 2014; Caines and Buttery, 2020) . The classification accuracy reported in these studies ranged between 62.7% and 83.8%. Hancke and Meurers (2013) reached a classification accuracy of 62.7% in predicting five (out of six) CEFR levels of professionally rated free text essays from the MERLIN database comprising CEFR exams taken by second language learners of German based on a total of 3821 lexical, morphological, and syntactic features using the Sequential Minimal Optimization (SMQ) algorithm implemented in WEKA. Using the same SMQ algorithm, achieved an accuracy of 67% in correctly identifying the CEFR level of L2 Swedish learner essays on the basis of 61 count-based, lexical, syntactic, morphological, and semantic features extracted from the linguistic annotation available in the SweLL corpus 3 . achieved 70% accuracy in predicting CEFR-levels of L1 French and Spanish L2 English users on the basis of manually annotated errors in the L1 French and Spanish subsets of the EFCAMDAT corpus. Vajjala and L\u00f5o (2014) reported a classification accuracy of 79% in an experiment on the Estonian Interlanguage Corpus 4 .", |
| "cite_spans": [ |
| { |
| "start": 206, |
| "end": 232, |
| "text": "(Hancke and Meurers, 2013;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 233, |
| "end": 256, |
| "text": "Vajjala and Rama, 2018;", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 257, |
| "end": 279, |
| "text": "Vajjala and L\u00f5o, 2014;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 280, |
| "end": 305, |
| "text": "Caines and Buttery, 2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 394, |
| "end": 419, |
| "text": "Hancke and Meurers (2013)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1274, |
| "end": 1296, |
| "text": "Vajjala and L\u00f5o (2014)", |
| "ref_id": "BIBREF53" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In another study, Vajjala and Rama (2018) performed experiments with cross-lingual and multilingual classifiers on individual language classification. The data used in their study included 2,286 manually graded texts (five levels, A1 to C1) from the MERLIN learner corpus (German, 1,029 texts; Italian, 803 texts, and Czech, 434 texts). Trained on a wide range of feature, such as word and POS n-grams, task-specific word and character embeddings, dependency n-grams, features pertaining to lexical richness and error features, their classification models obtained an accuracy of 0.68 for German, 0.84 for Italian and 0.73 for Czech for monolingual classification. For multilingual classification, their models reached classification accuracy up to 0.73. The data set and findings ob-tained in this study have served as the baseline for the REPROLANG 2020 shared task on 'Language proficiency scoring' 5 . In the context of this task, Caines and Buttery (2020) reproduce and extend the finding described in (Vajjala and Rama, 2018) reaching a classification accuracy of up to 83.8% for the Italian component. Their results indicate that feature-based approaches perform better than neural network classifiers for text datasets of the given size.", |
| "cite_spans": [ |
| { |
| "start": 18, |
| "end": 41, |
| "text": "Vajjala and Rama (2018)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 935, |
| "end": 960, |
| "text": "Caines and Buttery (2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1007, |
| "end": 1031, |
| "text": "(Vajjala and Rama, 2018)", |
| "ref_id": "BIBREF54" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The data come from the EFCAMDAT, an open access corpus compiled at Cambridge University in collaboration with EF Education First, an international school of English as a second/foreign language (Geertzen et al., 2014) . The corpus consists of writing assignments submitted to the Englishtown, the online school of EF Education First, summing up to a total of 1,180,309 individual writing samples by 174,743 L2 learners. The curriculum of Englishtown covers all six proficiency levels, from CEFR A1 to C2 organized along 16 EF teaching levels with each level subsuming 8 teaching units and ending with an open-ended writing task (128 distinct writing assignments). The length of the writing samples in the corpus increases monotonically with Englishtown levels, ranging from an average of 30.1 words at the lowest level (1) to an average of 170 words at the highest level (16). Since one of our main aims is to demonstrate the usefulness of the sliding window technique and the inclusion of a set of measurements per individual feature (complexity contours), we filtered the original dataset to obtain texts containing at least 100 words. This resulted in a total of 163,657 writing samples. In addition, we removed texts that had received exceptionally low scores on writing performance, since their inclusion would add bias and variance and may skew the results (B\u00f8velstad et al., 2017). Specifically, texts whose writing score fell more than 1.5 times the interquartile range below the first quartile, corresponding to a threshold score of 75%, were removed, which resulted in a loss of 7% of the data. The final dataset comprised a total of 152,314 individual learner texts. whose distributions across CEFR levels along with associated text length statistics are shown in Table 1 . ", |
| "cite_spans": [ |
| { |
| "start": 194, |
| "end": 217, |
| "text": "(Geertzen et al., 2014)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1775, |
| "end": 1782, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The 57 features used in this paper fall into four distinct groups: (1) measures of syntactic complexity, (2) measures of lexical richness, (3) measures pertaining to the usage of multi-word sequences (MWS) and (4) information-theoretic measures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The first group consists of 16 features used in the past to measure syntactic complexity in writing and its relation to writing proficiency reviewed in Section 2. These features are implemented based on descriptions in Lu (2010b) and using the Tregex tree pattern matching tool (Levy and Andrew, 2006) with syntactic parse trees for extracting specific patterns. The second group subsumes 13 features pertaining to lexical richness: five measures of lexical variation, one measure of lexical density, seven measures of lexical sophistication. The operationalizations of these measures follow those described in Lu (2012) and (Str\u00f6bel, 2014) . The third group includes 25 n-gram frequency features that are derived from the five register sub-components of the Contemporary Corpus of American English (COCA, (Davies, 2008) ): spoken, magazine, fiction, news and academic language 6 . Our frequency n-gram measures differ from those used in the earlier studies reviewed in Section 2. Instead of using only bigrams and trigrams, we extend them to include longer word combinations (four-and fivegrams) and use a more nuanced definition to operationalize the usage of such combinations given in equation 1:", |
| "cite_spans": [ |
| { |
| "start": 278, |
| "end": 301, |
| "text": "(Levy and Andrew, 2006)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 611, |
| "end": 620, |
| "text": "Lu (2012)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 625, |
| "end": 640, |
| "text": "(Str\u00f6bel, 2014)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 806, |
| "end": 820, |
| "text": "(Davies, 2008)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Norm n,s,r = |C n,s,r | \u2022 log h Q c2|Cn,s,r| freq n,r (c) i |U n,s |", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "4" |
| }, |
| { |
| "text": "(1) Let A n,s be the list of n-grams (n 2 [1, 5]) appearing within a sentence s, B n,r the list of n-gram appearing in the n-gram frequency list of register r (r 2 {acad, fic, mag, news, spok}) and C n,s,r = A n,s \\ B n,r the list of n-grams appearing both in s and the n-gram frequency list of register r. U n,s is defined as the list of unique n-gram in s, and freq n,r (a) the frequency of n-gram a according to the n-gram frequency list of register r.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "4" |
| }, |
| { |
| "text": "A total of 25 measures results from the combination of (a) a 'reference list' containing the top 100,000 most frequent n-grams and their frequencies from one of five register subcomponents of the COCA corpus and (b) the size of the n-gram (n 2 [1, 5]). The fourth group includes three information-theoretic measures that are based on Kolmogorov complexity. These measures use the Deflate algorithm (Deutsch, 1996) to compress a text and obtain complexity scores by relating the size of the compressed file to the size of the original file (for the operationalization and implementation of these measures see (Str\u00f6bel, 2014) ).", |
| "cite_spans": [ |
| { |
| "start": 398, |
| "end": 413, |
| "text": "(Deutsch, 1996)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 608, |
| "end": 623, |
| "text": "(Str\u00f6bel, 2014)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Text complexity of the writing samples was automatically assessed using the CoCoGen, a computational tool that implements a sliding-window technique to generate a series of measurements for a given complexity measure (CM) (Str\u00f6bel 2014) . In contrast to the standard approach that represents text complexity as a single score, providing a 'global assessment' of the complexity of a text, the use of a sliding-window technique enables a 'local' (sentence-level) assessment of complexity within a text. A sliding window can be conceived of as a window of size ws, which is defined by the number of sentences it contains. The window is moved across a text sentence-by-sentence, computing one value per window for a given CM. The series of measurements generated by CoCoGen captures the progression of linguistic complexity within a text for a given CM and is referred here to as a 'complexity contour' (see Figure 1) . To compute the complexity score of a given window, a measurement function is applied to each sentence in the window. The size of the window (ws) is user-defined parameter whose optimal value depends on the goals of the analysis: When complexity is measured for each sentence, i.e. ws = 1, the resulting complexity contour will typically exhibit many sharp turns. By increasing the window size, i.e. the number of sentences in a window, the complexity contour can be smoothened akin to a moving average technique. 7 In this paper, the window size parameter was set to one sentence, meaning that no smoothing of the curve was performed. Figure 1 illustrates complexity contours on three randomly selected texts across three CEFR levels (A2, B2 and C2) for eight selected complexity measures. CoCoGen uses the Stanford CoreNLP suite (Manning et al., 2014) for performing tokenization, sentence splitting, part-of-speech tagging, lemmatization and syntactic parsing (Probabilistic Context Free Grammar Parser (Klein and Manning, 2003) ).", |
| "cite_spans": [ |
| { |
| "start": 222, |
| "end": 236, |
| "text": "(Str\u00f6bel 2014)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 1746, |
| "end": 1768, |
| "text": "(Manning et al., 2014)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 1921, |
| "end": 1946, |
| "text": "(Klein and Manning, 2003)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 904, |
| "end": 913, |
| "text": "Figure 1)", |
| "ref_id": null |
| }, |
| { |
| "start": 1551, |
| "end": 1559, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Sliding-Window Approach", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We used a Recurrent Neural Network (RNN) classifier, specifically a dynamic RNN model with Gated Recurrent Unit (GRU) cells (Cho et al., 2014) . A dynamic RNN was chosen as it can handle sequences of variable length 8 . As shown in Figure 2 , the input of the contour-based model is a sequence X = (x 1 , x 2 , . . . , x l , x l+1 , . . . , x n ), where x i , the output of CoCoGen for the ith window of a document, is a 57 dimensional vector, l is the length of the sequence, n 2 Z is a number, which is greater or equal to the length of the longest sequence in the data and x l+1 , \u2022 \u2022 \u2022 , x n are padded 0-vectors. The input of the contour-based model was fed into a RNN that consists of two layers of GRU cells with 200 hidden units for each. To predict the class of a sequence, the last output of the RNN, i.e. the output of the RNN right after the feeding of x l , is transformed through a feed-forward neural network. The feed-forward neural-network consists of three fully connected layers, whose output dimensions are 512, 256, and 6 respectively. The Rectifier Linear Unit (ReLU) was used as an activation function. Before the final output, a softmax layer was applied. For the mean-based model, we used the same neural network as in the contour-based model, except that the network was trained with vectors of text-average complexity scores. The models were implemented using PyTorch (Pytorch, 2019) .", |
| "cite_spans": [ |
| { |
| "start": 124, |
| "end": 142, |
| "text": "(Cho et al., 2014)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1395, |
| "end": 1410, |
| "text": "(Pytorch, 2019)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 232, |
| "end": 240, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Classification Models", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We evenly split our data into 10 folds and applied a 10-fold cross validation, i.e. each time a fold (10% of data) is taken out as test set and the rest (90% of data) are used as the training set. In both 7 When the window size is specified to be greater than 1, CoCoGen returns complexity scores for a given measures as fractions (wnm/wdm). In this case, the denominators and numerators of the fractions from the first to the last sentence in the window are added up to form the denominator and numerator of the resulting complexity score of a given window.", |
| "cite_spans": [ |
| { |
| "start": 205, |
| "end": 206, |
| "text": "7", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification Models", |
| "sec_num": "6" |
| }, |
| { |
| "text": "NPpost NPpre ANC bigramAcad bigramFiction bigramSpoken 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10 Figure 1 : Complexity contours (window size = 1 sentence) for eight selected measures of complexity for three random texts from CEFR levels A2, B2 and C2. datasets, the distributions of classes were identical. As the loss function for training cross entropy was used:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 223, |
| "end": 231, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "CompNom cTTR", |
| "sec_num": null |
| }, |
| { |
| "text": "GRU GRU x 1 GRU GRU x 2 h 11 h 21 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 h 12 h 22 GRU GRU x l h 1,l 1 h 2,l 1 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 h 1l h 2l GRU GRU x n h 1,n 1 h 2,n 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CompNom cTTR", |
| "sec_num": null |
| }, |
| { |
| "text": "L(\u0176 , c) = C X i=1 p(y i ) log(p(\u0177 i ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CompNom cTTR", |
| "sec_num": null |
| }, |
| { |
| "text": "in which c is the true class label of the current observation, C is the number of classes, (p(y 1 ), . . . , p(y C )) is a one-hot vector with", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CompNom cTTR", |
| "sec_num": null |
| }, |
| { |
| "text": "p(y i ) = ( 1 i = c 0 otherwise and\u0176 = (p(\u0177 1 ), p(\u0177 2 ), . . . , p(\u0177 C ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CompNom cTTR", |
| "sec_num": null |
| }, |
| { |
| "text": "is the output vector of the softmax layer, which can be viewed as the predicted probabilities of the observed instance falling into to each of the classes. Since the EFCamDat dataset is hightly imbalanced, we additionally assigned weights to the classes for the cross entropy function, such that a k is weight for class k:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CompNom cTTR", |
| "sec_num": null |
| }, |
| { |
| "text": "a k = N 100N k", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CompNom cTTR", |
| "sec_num": null |
| }, |
| { |
| "text": "where N is the total number of instance in the dataset and N k is the number of instance in the dataset with label k. For optimization, we used Stochastic Gradient Descent (SGD) with a learning rate \u2318 = 0.01 momentum = 0.9 and a learning rate decay factor of 0.1. The minibatch size is 32, which was shown as a reasonable value for modern GPU (Masters and Luschi, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 343, |
| "end": 369, |
| "text": "(Masters and Luschi, 2018)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CompNom cTTR", |
| "sec_num": null |
| }, |
| { |
| "text": "To determine the relative importance of the individual features, we conducted feature ablation experiments for the contour-based RNN. Classical forward or backward sequential selection algorithms that proceed by sequentially adding or discarding features require a quadratic number of model training and evaluation in order to obtain a feature ranking (Langley, 1994) . In the context of neural network model training a quadratic number of models can become prohibitive. To alleviate this problem, we used an adapted version of the iterative sensitivity-based pruning algorithm proposed by D\u00edaz-Villanueva et al. (2010) . This algorithm ranks the features based on a 'sensitivity measure' (Moody, 1994; Utans and Moody, 1991) and removes the least relevant variables one at a time. The classifier is then retrained on the resulting subset and a new ranking is calculated over the remaining features. This process is repeated until all features are removed (see Algorithm 1). In this fashion, rather than training n(n+1) 2 required for sequential algorithms, the number of models trained is reduced to n m , where m is the number of features that can be removed at each step. We report the results obtained with m = 1, i.e. the removal of a single feature at each step. The procedure of finding the rank order of feature importance is de-scribed as following. To increase the robustness of the feature importance rank order, k-fold crossvalidation is applied. At step t, neural network models M t,n , n 2 {1, . . . , k} are trained on the training sets of a k-fold cross-validation, where n is the fold ID. The training sets at step t consist of instances with feature set F t = {f 1 , f 2 , . . . , f Dt } where f 1 , . . . f Dt are the remaining features at the current step, whose importance rank is to be determined. We define X t,n as the test set of the nth fold with feature set F t and X i t,n as the same dataset as X t,n except we set the i th feature f i of each instance within the dataset to its average. Furthermore, we define g(X) as the classification accuracy of M t,n for a dataset X. The sensitivity of feature f i on the nth fold at step t is obtained from:", |
| "cite_spans": [ |
| { |
| "start": 352, |
| "end": 367, |
| "text": "(Langley, 1994)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 590, |
| "end": 619, |
| "text": "D\u00edaz-Villanueva et al. (2010)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 689, |
| "end": 702, |
| "text": "(Moody, 1994;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 703, |
| "end": 725, |
| "text": "Utans and Moody, 1991)", |
| "ref_id": "BIBREF51" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Ablation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "S i,t,n = g(X t,n ) g(X i t,n )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Ablation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "The final sensitivity for a feature f i at step t is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Ablation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "S i,t = 1 k k X n=1 S i,t,n", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Ablation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "The most important feature at step t can be found by: f\u00ee :\u00ee = arg max", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Ablation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "i:f i 2Ft (S i,t )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Ablation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Then we set the rank for feature f\u00ee: Rank\u00ee = t In the end, feature f\u00ee is dropped from F t and the corresponding columns in training and test dataset are also dropped simultaneously:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Ablation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "F t+1 = F t {f\u00ee} This procedure is repeated, until |F t 0 | = 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Ablation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "An overview of the performance statistics of the models in terms of precision, recall and F1 scores is presented in Table 2 . The classification accuracy results indicated that the inclusion of complexity contours led to an increase in overall classification accuracy of 9.3% from 66.1% for the means-based RNN model to 75.4% for the contour-based RNN model. Classification performance of the contourbased RNN model was consistently higher than those of the means-based RNN model across all six CEFR proficiency levels. Its performance was higher for the beginner and intermediate CEFR proficiency levels (A1 to B2) with F1 scores ranging between 0.73 and 0.81 compared to the advanced levels (C1 and C2) with F1 scores dropping to 0.61 Table 3 . As is evident in this table, most classification errors appeared in adjacent categories, with few classification errors occurring between distant categories. The top 20 features that contributed most to the classification accuracy of the contour-based RNN model are shown in Table 2 (see the column 'Acc after Del'). The results of the feature ablation experiments revealed that classification accuracy was mainly driven by frequency n-grams measures pertaining to the usage of multiword sequences (MWS). The twelve of the top 20 features are uni-, bi-, and trigram frequency measures from all five register sub-components of the COCA corpus. Writing samples from higher CEFR levels exhibit higher scores for all five unigram measures. A similar pattern can be observed for bigram scores from the academic register. A more differentiated pattern is apparent in trigram measures: For example, trigram scores from the fiction register show a U-shaped progression, such that they first increase up to the B2 level and then decrease (see Table 5 and Figure 3 in the Appendix for an overview). Overall, these findings indicate the importance of including n-gram frequency measures for the task of automated language performance classification. Moreover, they are consistent with results reported Table 3 : Confusion matrix of the contour-based RNN model (sum across 10-fold cross validation). The C i;j value is the number of predictions known to be in group i and predicted to be in group j. in numerous studies indicating that the knowledge of MWS is a key component of both L1 and L2 writing and speaking skills (see e.g. (Christiansen and Arnon, 2017; Garner et al., 2020; Saito, 2020) . Another group of features that figures prominently in the top 20 list are five measures of lexical sophistication: Higher CEFR levels are characterized by higher proportions of unusual/advanced words and words of greater surface length (compare same vs. equal vs. identical vs. tantamount). These results replicate and extend the findings reported in Durrant and Brenchley (2019) and (Kerz et al., 2020) . Both studies found that measures of lexical sophistication are good predictors of children's L1 writing development. And finally, the top-20 list includes two of the three information-theoretic measures, indicating that more advanced learners produce considerably more complex (i.e. informationally denser) texts than beginner learners. The fact that two measures from the smallest subset of CMs were ranked among the top-20 most important features is an indication of their usefulness in research on automated proficiency classification. As discussed in detail in Ehret (2016) and Ehret and Szmrecsanyi (2019) , CMs based on Kolmogorov complexity have the potential to avoid some of the known problems of traditional metrics that are based on different measures of unit length and that involve frequencies of various types of forms, which gives rise to 'concept reductionism' (Ortega, 2012, 128) .", |
| "cite_spans": [ |
| { |
| "start": 2368, |
| "end": 2398, |
| "text": "(Christiansen and Arnon, 2017;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 2399, |
| "end": 2419, |
| "text": "Garner et al., 2020;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 2420, |
| "end": 2432, |
| "text": "Saito, 2020)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 2786, |
| "end": 2814, |
| "text": "Durrant and Brenchley (2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 2819, |
| "end": 2838, |
| "text": "(Kerz et al., 2020)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 3406, |
| "end": 3418, |
| "text": "Ehret (2016)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 3423, |
| "end": 3451, |
| "text": "Ehret and Szmrecsanyi (2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 3718, |
| "end": 3737, |
| "text": "(Ortega, 2012, 128)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 116, |
| "end": 123, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 737, |
| "end": 744, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 1022, |
| "end": 1029, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 1781, |
| "end": 1789, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 1794, |
| "end": 1802, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 2039, |
| "end": 2046, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "In this paper, we applied a sliding window technique in a feature-based modeling approach to automated classification of written proficiency levels on the CEFR-scale (A1-C2 levels). We made use of 'complexity contours' obtained through this technique to represent the distribution of scores per linguistic feature within a text in combination", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Outlook", |
| "sec_num": "9" |
| }, |
| { |
| "text": "https://spraakbanken.gu.se/eng/ research/icall/swellcorpus 4 http://evkk.tlu.ee/?language=en", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.lrec-conf.org/proceedings/ lrec2020/index.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The Contemporary Corpus of American English is the largest genre-balanced corpus of American English, which at the time the measures were derived comprised of 560 million words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The lengths of the feature vector sequences depends on the number of sentences of the texts in our corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "with RNN classifiers that exploit the sequential information in those contours. We demonstrated that an RNN classifier trained on complexity contours achieved higher classification accuracy across all six CEFR proficiency levels compared to one trained on text-average scores with an increase in performance of up to 14% in terms of precision and 15% in terms of recall. We also showed that iterative sensitivity-based pruning approach is a viable way of assessing relative feature importance in text classification tasks performed with neural network models. This approach taken in our paper has the potential to provide a valuable contribution to increasing efforts to identify 'criterial features', i.e. features that are characteristic and indicative of language proficiency at each level (Hawkins and Filipovi\u0107, 2012) . In our future work, we intend to include additional sets of features of language use based on crowd-sourced language metrics entitled word prevalence (Johns et al., 2020) as well as LIWC-style features that relate language use with behavioral and self-reported measures of personality, social behavior, and cognitive styles (Tausczik and Pennebaker, 2010) . We also intend to take into account the effects of task type on the features of language use investigated in this paper (Alexopoulou et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 793, |
| "end": 822, |
| "text": "(Hawkins and Filipovi\u0107, 2012)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1149, |
| "end": 1180, |
| "text": "(Tausczik and Pennebaker, 2010)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 1303, |
| "end": 1329, |
| "text": "(Alexopoulou et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Task effects on linguistic complexity and accuracy: A largescale learner corpus analysis employing natural language processing techniques", |
| "authors": [ |
| { |
| "first": "Theodora", |
| "middle": [], |
| "last": "Alexopoulou", |
| "suffix": "" |
| }, |
| { |
| "first": "Marije", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "Akira", |
| "middle": [], |
| "last": "Murakami", |
| "suffix": "" |
| }, |
| { |
| "first": "Detmar", |
| "middle": [], |
| "last": "Meurers", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Language Learning", |
| "volume": "67", |
| "issue": "S1", |
| "pages": "180--208", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theodora Alexopoulou, Marije Michel, Akira Mu- rakami, and Detmar Meurers. 2017. Task effects on linguistic complexity and accuracy: A large- scale learner corpus analysis employing natural lan- guage processing techniques. Language Learning, 67(S1):180-208.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Investigating the scope of textual metrics for learner level discrimination and learner analytics", |
| "authors": [ |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Ballier", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Gaillat", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Learner Corpus Research Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nicolas Ballier and Thomas Gaillat. 2019. Investigat- ing the scope of textual metrics for learner level dis- crimination and learner analytics. In Learner Cor- pus Research Conference.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A supervised learning model for the automatic assessment of language levels based on learner errors", |
| "authors": [ |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Ballier", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Gaillat", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Simpkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernardo", |
| "middle": [], |
| "last": "Stearns", |
| "suffix": "" |
| }, |
| { |
| "first": "Manon", |
| "middle": [], |
| "last": "Bouy\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Manel", |
| "middle": [], |
| "last": "Zarrouk", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "European Conference on Technology Enhanced Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "308--320", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nicolas Ballier, Thomas Gaillat, Andrew Simpkin, Bernardo Stearns, Manon Bouy\u00e9, and Manel Zarrouk. 2019. A supervised learning model for the automatic assessment of language levels based on learner errors. In European Conference on Technol- ogy Enhanced Learning, pages 308-320. Springer.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Toefl11: A corpus of non-native english", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Blanchard", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Tetreault", |
| "suffix": "" |
| }, |
| { |
| "first": "Derrick", |
| "middle": [], |
| "last": "Higgins", |
| "suffix": "" |
| }, |
| { |
| "first": "Aoife", |
| "middle": [], |
| "last": "Cahill", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Chodorow", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "ETS Research Report Series", |
| "volume": "2013", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Blanchard, Joel Tetreault, Derrick Higgins, Aoife Cahill, and Martin Chodorow. 2013. Toefl11: A corpus of non-native english. ETS Research Re- port Series, 2013(2):i-15.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A standard operating procedure for outlier removal in large-sample epidemiological transcriptomics datasets", |
| "authors": [ |
| { |
| "first": "Einar", |
| "middle": [], |
| "last": "Hege Marie B\u00f8velstad", |
| "suffix": "" |
| }, |
| { |
| "first": "Lars", |
| "middle": [ |
| "Ailo" |
| ], |
| "last": "Holsb\u00f8", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiliv", |
| "middle": [], |
| "last": "Bongo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lund", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "BioRxiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hege Marie B\u00f8velstad, Einar Holsb\u00f8, Lars Ailo Bongo, and Eiliv Lund. 2017. A standard operating pro- cedure for outlier removal in large-sample epidemi- ological transcriptomics datasets. BioRxiv, page 144519.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "The MERLIN corpus: Learner language and the CEFR", |
| "authors": [ |
| { |
| "first": "Adriane", |
| "middle": [], |
| "last": "Boyd", |
| "suffix": "" |
| }, |
| { |
| "first": "Jirka", |
| "middle": [], |
| "last": "Hana", |
| "suffix": "" |
| }, |
| { |
| "first": "Lionel", |
| "middle": [], |
| "last": "Nicolas", |
| "suffix": "" |
| }, |
| { |
| "first": "Detmar", |
| "middle": [], |
| "last": "Meurers", |
| "suffix": "" |
| }, |
| { |
| "first": "Katrin", |
| "middle": [], |
| "last": "Wisniewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Abel", |
| "suffix": "" |
| }, |
| { |
| "first": "Karin", |
| "middle": [], |
| "last": "Sch\u00f6ne", |
| "suffix": "" |
| }, |
| { |
| "first": "Barbora", |
| "middle": [], |
| "last": "Stindlov\u00e1", |
| "suffix": "" |
| }, |
| { |
| "first": "Chiara", |
| "middle": [], |
| "last": "Vettori", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "1281--1288", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adriane Boyd, Jirka Hana, Lionel Nicolas, Detmar Meurers, Katrin Wisniewski, Andrea Abel, Karin Sch\u00f6ne, Barbora Stindlov\u00e1, and Chiara Vettori. 2014. The MERLIN corpus: Learner language and the CEFR. In LREC, pages 1281-1288. Reykjavik, Ice- land.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Defining and operationalising l2 complexity. Dimensions of L2 performance and proficiency: Complexity, accuracy and fluency in SLA", |
| "authors": [ |
| { |
| "first": "Bram", |
| "middle": [], |
| "last": "Bult\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Housen", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "23--46", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bram Bult\u00e9 and Alex Housen. 2012. Defining and oper- ationalising l2 complexity. Dimensions of L2 perfor- mance and proficiency: Complexity, accuracy and fluency in SLA, pages 23-46.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Reprolang 2020: Automatic proficiency scoring of czech, english, german, italian, and spanish learner essays", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Caines", |
| "suffix": "" |
| }, |
| { |
| "first": "Paula", |
| "middle": [], |
| "last": "Buttery", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "5614--5623", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Caines and Paula Buttery. 2020. Reprolang 2020: Automatic proficiency scoring of czech, en- glish, german, italian, and spanish learner essays. In Proceedings of The 12th Language Resources and Evaluation Conference, pages 5614-5623.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "On the properties of neural machine translation: Encoder-decoder approaches", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merrienboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "KyungHyun Cho, Bart van Merrienboer, Dzmitry Bah- danau, and Yoshua Bengio. 2014. On the properties of neural machine translation: Encoder-decoder ap- proaches. CoRR, abs/1409.1259.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "More than words: The role of multiword sequences in language learning and use", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Morten", |
| "suffix": "" |
| }, |
| { |
| "first": "Inbal", |
| "middle": [], |
| "last": "Christiansen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Arnon", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Topics in Cognitive Science", |
| "volume": "9", |
| "issue": "3", |
| "pages": "542--551", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Morten H Christiansen and Inbal Arnon. 2017. More than words: The role of multiword sequences in lan- guage learning and use. Topics in Cognitive Science, 9(3):542-551.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "The corpus of contemporary american english (coca): 560 million words", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Davies", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Davies. 2008. The corpus of contemporary american english (coca): 560 million words, 1990- present.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Deflate compressed data format specification version 1.3. IETF RFC", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Deutsch", |
| "suffix": "" |
| } |
| ], |
| "year": 1951, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Deutsch. 1996. Deflate compressed data format specification version 1.3. IETF RFC 1951.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Learning improved feature rankings through decremental input pruning for support vector based drug activity prediction", |
| "authors": [ |
| { |
| "first": "Wladimiro", |
| "middle": [], |
| "last": "D\u00edaz-Villanueva", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Francesc", |
| "suffix": "" |
| }, |
| { |
| "first": "Vicente", |
| "middle": [], |
| "last": "Ferri", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cerver\u00f3n", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "International Conference on Industrial, Engineering and Other Applications of Applied Intelligent Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "653--661", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wladimiro D\u00edaz-Villanueva, Francesc J Ferri, and Vi- cente Cerver\u00f3n. 2010. Learning improved feature rankings through decremental input pruning for sup- port vector based drug activity prediction. In Inter- national Conference on Industrial, Engineering and Other Applications of Applied Intelligent Systems, pages 653-661. Springer.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Development of vocabulary sophistication across genres in English children's writing", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Durrant", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Brenchley", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Reading and Writing", |
| "volume": "32", |
| "issue": "8", |
| "pages": "1927--1953", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Durrant and Mark Brenchley. 2019. Develop- ment of vocabulary sophistication across genres in English children's writing. Reading and Writing, 32(8):1927-1953.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Common European Framework of Reference for Languages: learning, teaching, assessment", |
| "authors": [ |
| { |
| "first": "Europe", |
| "middle": [], |
| "last": "Council Of", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Council of Europe. Council for Cultural Co-operation. Education Committee. Modern Languages Division. 2018. Common European Framework of Refer- ence for Languages: learning, teaching, assessment. Cambridge University Press.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "An information-theoretic approach to language complexity: variation in naturalistic corpora", |
| "authors": [ |
| { |
| "first": "Katharina", |
| "middle": [ |
| "Ehret" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katharina Ehret. 2016. An information-theoretic ap- proach to language complexity: variation in natu- ralistic corpora. Ph.D. thesis, Universit\u00e4t.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Compressing learner language: An informationtheoretic measure of complexity in sla production data", |
| "authors": [ |
| { |
| "first": "Katharina", |
| "middle": [], |
| "last": "Ehret", |
| "suffix": "" |
| }, |
| { |
| "first": "Benedikt", |
| "middle": [], |
| "last": "Szmrecsanyi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Second Language Research", |
| "volume": "35", |
| "issue": "1", |
| "pages": "23--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katharina Ehret and Benedikt Szmrecsanyi. 2019. Compressing learner language: An information- theoretic measure of complexity in sla production data. Second Language Research, 35(1):23-45.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Beginning and intermediate l2 writer's use of n-grams: an association measures study. International Review of Applied Linguistics in Language Teaching", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Garner", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Crossley", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristopher", |
| "middle": [], |
| "last": "Kyle", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "58", |
| "issue": "", |
| "pages": "51--74", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Garner, Scott Crossley, and Kristopher Kyle. 2020. Beginning and intermediate l2 writer's use of n-grams: an association measures study. Inter- national Review of Applied Linguistics in Language Teaching, 58(1):51-74.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Automatic linguistic annotation of large scale L2 databases: The EF-Cambridge Open Language Database", |
| "authors": [ |
| { |
| "first": "Jeroen", |
| "middle": [], |
| "last": "Geertzen", |
| "suffix": "" |
| }, |
| { |
| "first": "Theodora", |
| "middle": [], |
| "last": "Alexopoulou", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeroen Geertzen, Theodora Alexopoulou, and Anna Ko- rhonen. 2014. Automatic linguistic annotation of large scale L2 databases: The EF-Cambridge Open Language Database(EFCamDat).", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "The international corpus of learner english. handbook and cd-rom", |
| "authors": [ |
| { |
| "first": "Sylviane", |
| "middle": [], |
| "last": "Granger", |
| "suffix": "" |
| }, |
| { |
| "first": "Estelle", |
| "middle": [], |
| "last": "Dagneaux", |
| "suffix": "" |
| }, |
| { |
| "first": "Fanny", |
| "middle": [], |
| "last": "Meunier", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sylviane Granger, Estelle Dagneaux, and Fanny Meu- nier. 2002. The international corpus of learner en- glish. handbook and cd-rom.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Exploring CEFR classification for German based on rich linguistic modeling", |
| "authors": [ |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hancke", |
| "suffix": "" |
| }, |
| { |
| "first": "Detmar", |
| "middle": [], |
| "last": "Meurers", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Learner Corpus Research", |
| "volume": "", |
| "issue": "", |
| "pages": "54--56", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julia Hancke and Detmar Meurers. 2013. Exploring CEFR classification for German based on rich lin- guistic modeling. Learner Corpus Research, pages 54-56.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Criterial features in L2 English: Specifying the reference levels of the Common European Framework", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Luna", |
| "middle": [], |
| "last": "Hawkins", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Filipovi\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John A Hawkins and Luna Filipovi\u0107. 2012. Criterial features in L2 English: Specifying the reference lev- els of the Common European Framework, volume 1. Cambridge University Press.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Learner corpora and automated scoring", |
| "authors": [ |
| { |
| "first": "Derrick", |
| "middle": [], |
| "last": "Higgins", |
| "suffix": "" |
| }, |
| { |
| "first": "Chaitanya", |
| "middle": [], |
| "last": "Ramineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Zechner", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "The Cambridge Handbook of Learner Corpus Research", |
| "volume": "", |
| "issue": "", |
| "pages": "587--604", |
| "other_ids": { |
| "DOI": [ |
| "10.1017/cbo9781139649414.026" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Derrick Higgins, Chaitanya Ramineni, and Klaus Zech- ner. 2015. Learner corpora and automated scor- ing. In Sylviane Granger, Gaetanelle Gilquin, and Fanny Meunier, editors, The Cambridge Handbook of Learner Corpus Research, pages 587-604. Cam- bridge University Press.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Estimating the prevalence and diversity of words in written language", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Brendan", |
| "suffix": "" |
| }, |
| { |
| "first": "Melody", |
| "middle": [], |
| "last": "Johns", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael N", |
| "middle": [], |
| "last": "Dye", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Quarterly Journal of Experimental Psychology", |
| "volume": "73", |
| "issue": "6", |
| "pages": "841--855", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brendan T Johns, Melody Dye, and Michael N Jones. 2020. Estimating the prevalence and diversity of words in written language. Quarterly Journal of Ex- perimental Psychology, 73(6):841-855.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Automated essay scoring: A survey of the state of the art", |
| "authors": [ |
| { |
| "first": "Zixuan", |
| "middle": [], |
| "last": "Ke", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "6300--6308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zixuan Ke and Vincent Ng. 2019. Automated essay scoring: A survey of the state of the art. In IJCAI, pages 6300-6308.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Becoming linguistically mature: Modeling english and german children's writing development across school grades", |
| "authors": [ |
| { |
| "first": "Elma", |
| "middle": [], |
| "last": "Kerz", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Qiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Wiechmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "65--74", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elma Kerz, Yu Qiao, Daniel Wiechmann, and Mar- cus Str\u00f6bel. 2020. Becoming linguistically mature: Modeling english and german children's writing de- velopment across school grades. In Proceedings of the Fifteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 65-74.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Word association profiles and their use for automated scoring of essays", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Beata Beigman Klebanov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Flor", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1148--1158", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Beata Beigman Klebanov and Michael Flor. 2013. Word association profiles and their use for auto- mated scoring of essays. In Proceedings of the 51st Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 1148-1158.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Automated evaluation of writing-50 years and counting", |
| "authors": [ |
| { |
| "first": "Nitin", |
| "middle": [], |
| "last": "Beata Beigman Klebanov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Madnani", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "7796--7810", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Beata Beigman Klebanov and Nitin Madnani. 2020. Automated evaluation of writing-50 years and counting. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 7796-7810.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Accurate unlexicalized parsing", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 41st Annual Meeting on Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "423--430", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Klein and Christopher D Manning. 2003. Accu- rate unlexicalized parsing. In Proceedings of the 41st Annual Meeting on Association for Computa- tional Linguistics-Volume 1, pages 423-430. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Variation in syntactic complexity: Introduction", |
| "authors": [ |
| { |
| "first": "Folkert", |
| "middle": [], |
| "last": "Kuiken", |
| "suffix": "" |
| }, |
| { |
| "first": "Ineke", |
| "middle": [], |
| "last": "Vedder", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Housen", |
| "suffix": "" |
| }, |
| { |
| "first": "Bastien De", |
| "middle": [], |
| "last": "Clercq", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Journal of Applied Linguistics", |
| "volume": "29", |
| "issue": "2", |
| "pages": "161--170", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Folkert Kuiken, Ineke Vedder, Alex Housen, and Bastien De Clercq. 2019. Variation in syntactic com- plexity: Introduction. International Journal of Ap- plied Linguistics, 29(2):161-170.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Automatically assessing lexical sophistication: Indices, tools, findings, and application", |
| "authors": [ |
| { |
| "first": "Kristopher", |
| "middle": [], |
| "last": "Kyle", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Scott", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Crossley", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Tesol Quarterly", |
| "volume": "49", |
| "issue": "4", |
| "pages": "757--786", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kristopher Kyle and Scott A Crossley. 2014. Auto- matically assessing lexical sophistication: Indices, tools, findings, and application. Tesol Quarterly, 49(4):757-786.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Selection of relevant features in machine learning", |
| "authors": [ |
| { |
| "first": "Pat", |
| "middle": [], |
| "last": "Langley", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Proceedings of the AAAI Fall symposium on relevance", |
| "volume": "", |
| "issue": "", |
| "pages": "1--5", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pat Langley. 1994. Selection of relevant features in ma- chine learning. In Proceedings of the AAAI Fall sym- posium on relevance, pages 1-5.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "The emergence of complexity, fluency, and accuracy in the oral and written production of five chinese learners of english", |
| "authors": [ |
| { |
| "first": "Diane", |
| "middle": [], |
| "last": "Larsen-Freeman", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Applied linguistics", |
| "volume": "27", |
| "issue": "", |
| "pages": "590--619", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diane Larsen-Freeman. 2006. The emergence of com- plexity, fluency, and accuracy in the oral and written production of five chinese learners of english. Ap- plied linguistics, 27(4):590-619.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Tregex and tsurgeon: tools for querying and manipulating tree data structures", |
| "authors": [ |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Galen", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "2231--2234", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roger Levy and Galen Andrew. 2006. Tregex and tsur- geon: tools for querying and manipulating tree data structures. In LREC, pages 2231-2234. Citeseer.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Automatic analysis of syntactic complexity in second language writing", |
| "authors": [ |
| { |
| "first": "Xiaofei", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "International Journal of Corpus Linguistics", |
| "volume": "15", |
| "issue": "4", |
| "pages": "474--496", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaofei Lu. 2010a. Automatic analysis of syntac- tic complexity in second language writing. Inter- national Journal of Corpus Linguistics, 15(4):474- 496.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Automatic analysis of syntactic complexity in second language writing", |
| "authors": [ |
| { |
| "first": "Xiaofei", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "International journal of corpus linguistics", |
| "volume": "15", |
| "issue": "4", |
| "pages": "474--496", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaofei Lu. 2010b. Automatic analysis of syntactic complexity in second language writing. Interna- tional journal of corpus linguistics, 15(4):474-496.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "A corpus-based evaluation of syntactic complexity measures as indices of college-level esl writers' language development", |
| "authors": [ |
| { |
| "first": "Xiaofei", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Tesol Quarterly", |
| "volume": "45", |
| "issue": "1", |
| "pages": "36--62", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaofei Lu. 2011. A corpus-based evaluation of syntac- tic complexity measures as indices of college-level esl writers' language development. Tesol Quarterly, 45(1):36-62.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "The relationship of lexical richness to the quality of esl learners' oral narratives. The Modern Language", |
| "authors": [ |
| { |
| "first": "Xiaofei", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Journal", |
| "volume": "96", |
| "issue": "2", |
| "pages": "190--208", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaofei Lu. 2012. The relationship of lexical richness to the quality of esl learners' oral narratives. The Modern Language Journal, 96(2):190-208.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "The stanford corenlp natural language processing toolkit", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jenny", |
| "middle": [], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mcclosky", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of 52nd annual meeting of the association for computational linguistics: system demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "55--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven Bethard, and David McClosky. 2014. The stanford corenlp natural language pro- cessing toolkit. In Proceedings of 52nd annual meet- ing of the association for computational linguistics: system demonstrations, pages 55-60.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Revisiting small batch training for deep neural networks", |
| "authors": [ |
| { |
| "first": "Dominic", |
| "middle": [], |
| "last": "Masters", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlo", |
| "middle": [], |
| "last": "Luschi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1804.07612" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dominic Masters and Carlo Luschi. 2018. Revisiting small batch training for deep neural networks. arXiv preprint arXiv:1804.07612.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Prediction risk and architecture selection for neural networks", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Moody", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "From statistics to neural networks", |
| "volume": "", |
| "issue": "", |
| "pages": "147--165", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Moody. 1994. Prediction risk and architecture se- lection for neural networks. In From statistics to neural networks, pages 147-165. Springer.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Syntactic complexity measures and their relationship to l2 proficiency: A research synthesis of college-level l2 writing", |
| "authors": [ |
| { |
| "first": "Lourdes", |
| "middle": [], |
| "last": "Ortega", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Applied linguistics", |
| "volume": "24", |
| "issue": "4", |
| "pages": "492--518", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lourdes Ortega. 2003. Syntactic complexity measures and their relationship to l2 proficiency: A research synthesis of college-level l2 writing. Applied lin- guistics, 24(4):492-518.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Interlanguage complexity. Linguistic complexity: Second language acquisition, indigenization", |
| "authors": [ |
| { |
| "first": "Lourdes", |
| "middle": [], |
| "last": "Ortega", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "contact", |
| "volume": "13", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lourdes Ortega. 2012. Interlanguage complexity. Lin- guistic complexity: Second language acquisition, in- digenization, contact, 13:127.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Predicting proficiency levels in learner writings by transferring a linguistic complexity model from expert-written coursebooks", |
| "authors": [ |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Ildik\u00f3 Pil\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Torsten", |
| "middle": [], |
| "last": "Volodina", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zesch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of COL-ING 2016, the 26th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "2101--2111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ildik\u00f3 Pil\u00e1n, Elena Volodina, and Torsten Zesch. 2016. Predicting proficiency levels in learner writings by transferring a linguistic complexity model from expert-written coursebooks. In Proceedings of COL- ING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 2101-2111.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Pytorch: Tensors and dynamic neural networks in Python with strong GPU acceleration", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pytorch", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pytorch. 2019. Pytorch: Tensors and dynamic neural networks in Python with strong GPU acceleration. https://github.com/pytorch/pytorch.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Compilation of the yonsei english learner corpus (yelc) 2011 and its use for understanding current usage of english by korean pre-university students", |
| "authors": [ |
| { |
| "first": "Chae", |
| "middle": [ |
| "Kwan" |
| ], |
| "last": "Seok-Chae Rhee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jung", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "The Journal of the Korea Contents Association", |
| "volume": "14", |
| "issue": "11", |
| "pages": "1019--1029", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Seok-Chae Rhee and Chae Kwan Jung. 2014. Com- pilation of the yonsei english learner corpus (yelc) 2011 and its use for understanding current usage of english by korean pre-university students. The Jour- nal of the Korea Contents Association, 14(11):1019- 1029.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Multi-or single-word units? the role of collocation use in comprehensible and contextually appropriate second language speech", |
| "authors": [ |
| { |
| "first": "Kazuya", |
| "middle": [], |
| "last": "Saito", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "70", |
| "issue": "", |
| "pages": "548--588", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kazuya Saito. 2020. Multi-or single-word units? the role of collocation use in comprehensible and con- textually appropriate second language speech. Lan- guage Learning, 70(2):548-588.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Formulaic sequences: Do they exist and do they matter? The mental lexicon", |
| "authors": [ |
| { |
| "first": "Cyrus", |
| "middle": [], |
| "last": "Shaoul", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Westbury", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "6", |
| "issue": "", |
| "pages": "171--196", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cyrus Shaoul and Chris Westbury. 2011. Formulaic sequences: Do they exist and do they matter? The mental lexicon, 6(1):171-196.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Tracking complexity of l2 academic texts: A sliding-window approach", |
| "authors": [ |
| { |
| "first": "Marcus", |
| "middle": [], |
| "last": "Str\u00f6bel", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcus Str\u00f6bel. 2014. Tracking complexity of l2 aca- demic texts: A sliding-window approach. Master thesis. RWTH Aachen University.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "A neural approach to automated essay scoring", |
| "authors": [ |
| { |
| "first": "Kaveh", |
| "middle": [], |
| "last": "Taghipour", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 conference on empirical methods in natural language processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1882--1891", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaveh Taghipour and Hwee Tou Ng. 2016. A neural approach to automated essay scoring. In Proceed- ings of the 2016 conference on empirical methods in natural language processing, pages 1882-1891.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "The psychological meaning of words: LIWC and computerized text analysis methods", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Yla", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "W" |
| ], |
| "last": "Tausczik", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pennebaker", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Journal of Language and Social Psychology", |
| "volume": "29", |
| "issue": "1", |
| "pages": "24--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yla R Tausczik and James W Pennebaker. 2010. The psychological meaning of words: LIWC and com- puterized text analysis methods. Journal of Lan- guage and Social Psychology, 29(1):24-54.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Selecting neural network architectures via the prediction risk: Application to corporate bond rating prediction", |
| "authors": [ |
| { |
| "first": "Joachim", |
| "middle": [], |
| "last": "Utans", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Moody", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "Proceedings First International Conference on Artificial Intelligence Applications on Wall Street", |
| "volume": "", |
| "issue": "", |
| "pages": "35--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joachim Utans and John Moody. 1991. Selecting neu- ral network architectures via the prediction risk: Ap- plication to corporate bond rating prediction. In Pro- ceedings First International Conference on Artificial Intelligence Applications on Wall Street, pages 35- 41. IEEE.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Automated assessment of nonnative learner essays: Investigating the role of linguistic features", |
| "authors": [ |
| { |
| "first": "Sowmya", |
| "middle": [], |
| "last": "Vajjala", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Journal of Artificial Intelligence in Education", |
| "volume": "28", |
| "issue": "1", |
| "pages": "79--105", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sowmya Vajjala. 2018. Automated assessment of non- native learner essays: Investigating the role of lin- guistic features. International Journal of Artificial Intelligence in Education, 28(1):79-105.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Automatic CEFR level prediction for Estonian learner text", |
| "authors": [ |
| { |
| "first": "Sowmya", |
| "middle": [], |
| "last": "Vajjala", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaidi", |
| "middle": [], |
| "last": "L\u00f5o", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the third workshop on NLP for computer-assisted language learning", |
| "volume": "", |
| "issue": "", |
| "pages": "113--127", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sowmya Vajjala and Kaidi L\u00f5o. 2014. Automatic CEFR level prediction for Estonian learner text. In Proceedings of the third workshop on NLP for computer-assisted language learning, pages 113- 127, Uppsala, Sweden. LiU Electronic Press.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Experiments with universal CEFR classification", |
| "authors": [ |
| { |
| "first": "Sowmya", |
| "middle": [], |
| "last": "Vajjala", |
| "suffix": "" |
| }, |
| { |
| "first": "Taraka", |
| "middle": [], |
| "last": "Rama", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "147--153", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-0515" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sowmya Vajjala and Taraka Rama. 2018. Experiments with universal CEFR classification. In Proceedings of the Thirteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 147-153, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Classification of Swedish learner essays by CEFR levels. CALL communities and culture-short papers from EUROCALL", |
| "authors": [ |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Volodina", |
| "suffix": "" |
| }, |
| { |
| "first": "Ildik\u00f3", |
| "middle": [], |
| "last": "Pil\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Alfter", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "456--461", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elena Volodina, Ildik\u00f3 Pil\u00e1n, and David Alfter. 2016. Classification of Swedish learner essays by CEFR levels. CALL communities and culture-short papers from EUROCALL, 2016:456-461.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "Modeling the readability of German targeting adults and children: An empirically broad analysis and its crosscorpus validation", |
| "authors": [ |
| { |
| "first": "Zarah", |
| "middle": [], |
| "last": "Wei\u00df", |
| "suffix": "" |
| }, |
| { |
| "first": "Detmar", |
| "middle": [], |
| "last": "Meurers", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "303--317", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zarah Wei\u00df and Detmar Meurers. 2018. Modeling the readability of German targeting adults and chil- dren: An empirically broad analysis and its cross- corpus validation. In Proceedings of the 27th Inter- national Conference on Computational Linguistics, pages 303-317, Santa Fe, New Mexico, USA. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Empirical learner language and the levels of the common european framework of reference", |
| "authors": [ |
| { |
| "first": "Katrin", |
| "middle": [], |
| "last": "Wisniewski", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Language Learning", |
| "volume": "67", |
| "issue": "S1", |
| "pages": "232--253", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katrin Wisniewski. 2017. Empirical learner language and the levels of the common european framework of reference. Language Learning, 67(S1):232-253.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Second language development in writing: Measures of fluency, accuracy, & complexity. 17", |
| "authors": [ |
| { |
| "first": "Kate", |
| "middle": [], |
| "last": "Wolfe-Quintero", |
| "suffix": "" |
| }, |
| { |
| "first": "Shunji", |
| "middle": [], |
| "last": "Inagaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Hae-Young", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kate Wolfe-Quintero, Shunji Inagaki, and Hae-Young Kim. 1998. Second language development in writ- ing: Measures of fluency, accuracy, & complexity. 17. University of Hawaii Press.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "Modeling coherence in esol learner texts", |
| "authors": [ |
| { |
| "first": "Helen", |
| "middle": [], |
| "last": "Yannakoudakis", |
| "suffix": "" |
| }, |
| { |
| "first": "Ted", |
| "middle": [], |
| "last": "Briscoe", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Seventh Workshop on Building Educational Applications Using NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "33--43", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Helen Yannakoudakis and Ted Briscoe. 2012. Model- ing coherence in esol learner texts. In Proceedings of the Seventh Workshop on Building Educational Applications Using NLP, pages 33-43.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "A new dataset and method for automatically grading ESOL texts", |
| "authors": [ |
| { |
| "first": "Helen", |
| "middle": [], |
| "last": "Yannakoudakis", |
| "suffix": "" |
| }, |
| { |
| "first": "Ted", |
| "middle": [], |
| "last": "Briscoe", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Medlock", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "180--189", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Helen Yannakoudakis, Ted Briscoe, and Ben Medlock. 2011. A new dataset and method for automatically grading ESOL texts. In Proceedings of the 49th An- nual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 180-189, Portland, Oregon, USA. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "text": "Roll-out of the contour-based RNN model based on complexity contours", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "text": "Distribution of texts by CEFR proficiency level and text length statistics (in words)", |
| "html": null, |
| "content": "<table><tr><td colspan=\"3\">CEFR N texts Mean length</td><td>SD</td></tr><tr><td>A1</td><td>8313</td><td>132.24</td><td>42.62</td></tr><tr><td>A2</td><td>19587</td><td>119.70</td><td>27.22</td></tr><tr><td>B1</td><td>61396</td><td>118.86</td><td>24.94</td></tr><tr><td>B2</td><td>48535</td><td>142.89</td><td>35.03</td></tr><tr><td>C1</td><td>12831</td><td>174.08</td><td>37.36</td></tr><tr><td>C2</td><td>1652</td><td>180.79</td><td>63.49</td></tr></table>", |
| "num": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "text": "Feature ablation algorithm Input: N training instances with feature set F = {f 1 , . . . , f D } Input: m features to remove at each step Result: list containing the feature importance rank order", |
| "html": null, |
| "content": "<table><tr><td colspan=\"2\">1 begin</td><td/></tr><tr><td>2</td><td>t</td><td>0</td></tr><tr><td>3</td><td>list</td><td/><td>[]</td></tr><tr><td>4 5</td><td colspan=\"3\">while |F | > 0 do Train a classifier with |F | input features;</td></tr><tr><td>6 7</td><td/><td colspan=\"2\">Compute S i,t , i 2 F ; Find f i 1 , . . . , f im , where</td></tr><tr><td/><td/><td colspan=\"2\">S i 1 ,t , . . . , S im,t are m largest</td></tr><tr><td/><td/><td colspan=\"2\">among all S i,t (i 2 F ) in descending order;</td></tr><tr><td>8</td><td/><td>list</td><td>list.append([f i 1 , . . . , f im ]);</td></tr><tr><td>9 10</td><td/><td>F t</td><td>F {f i 1 , . . . , f im }; t + 1;</td></tr><tr><td>11</td><td colspan=\"3\">return list</td></tr><tr><td colspan=\"4\">for the C1 level and 0.42 for the C2 level. The</td></tr><tr><td colspan=\"4\">confusion matrix of the contour-based RNN model</td></tr><tr><td colspan=\"4\">is presented in</td></tr></table>", |
| "num": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "text": "Performance statistics of RNN classifiers (left) and results of feature ablation (right). Values in '()' indicate standard deviations. 'Base Mod Acc'= Accuracy of baseline model; 'Acc after Del' = Accuracy of model after deletion of feature (only top-20 features are shown).", |
| "html": null, |
| "content": "<table><tr><td colspan=\"3\">Performance Means-based Contour-based</td><td colspan=\"2\">Feature importance</td><td/></tr><tr><td>statistics</td><td>RNN Model</td><td>RNN Model</td><td>CM</td><td colspan=\"2\">Base Mod Acc Acc after Del</td></tr><tr><td colspan=\"2\">Accuracy train 0.938 (0.012)</td><td>0.976 (0.012)</td><td>Bigram fic</td><td>0.754 (0.004)</td><td>0.684 (0.005)</td></tr><tr><td>Accuracy test</td><td>0.661 (0.004)</td><td>0.754 (0.004)</td><td>Bigram acad</td><td>0.748 (0.003)</td><td>0.686 (0.005)</td></tr><tr><td>Precision A1</td><td>0.677 (0.021)</td><td>0.784 (0.016)</td><td>ANC</td><td>0.744 (0.006)</td><td>0.691 (0.006)</td></tr><tr><td>Precision A2</td><td>0.640 (0.013)</td><td>0.745 (0.007)</td><td>MLWs</td><td>0.732 (0.003)</td><td>0.689 (0.002)</td></tr><tr><td>Precision B1</td><td>0.710 (0.005)</td><td>0.795 (0.005)</td><td>MLWc</td><td>0.728 (0.004)</td><td>0.675 (0.005)</td></tr><tr><td>Precision B2</td><td>0.657 (0.007)</td><td>0.739 (0.004)</td><td>Bigram spok</td><td>0.720 (0.002)</td><td>0.664 (0.004)</td></tr><tr><td>Precision C1</td><td>0.479 (0.008)</td><td>0.632 (0.014)</td><td colspan=\"2\">Unigram acad 0.712 (0.005)</td><td>0.659 (0.004)</td></tr><tr><td>Precision C2</td><td>0.436 (0.050)</td><td>0.505 (0.043)</td><td>Trigram fic</td><td>0.712 (0.004)</td><td>0.665 (0.005)</td></tr><tr><td>Recall A1</td><td>0.677 (0.021)</td><td>0.784 (0.016)</td><td>BNC</td><td>0.706 (0.004)</td><td>0.669 (0.004)</td></tr><tr><td>Recall A2</td><td>0.640 (0.013)</td><td>0.745 (0.007)</td><td>Bigram news</td><td>0.695 (0.004)</td><td>0.661 (0.004)</td></tr><tr><td>Recall B1</td><td>0.710 (0.005)</td><td>0.795 (0.005)</td><td>Unigram fic</td><td>0.691 (0.004)</td><td>0.660 (0.006)</td></tr><tr><td>Recall B2</td><td>0.657 (0.007)</td><td>0.739 (0.004)</td><td>NGSL</td><td>0.687 (0.005)</td><td>0.655 (0.005)</td></tr><tr><td>Recall C1</td><td>0.479 (0.008)</td><td>0.632 (0.014)</td><td>LD</td><td>0.681 (0.004)</td><td>0.646 (0.003)</td></tr><tr><td>Recall C2</td><td>0.436 (0.050)</td><td>0.505 (0.043)</td><td colspan=\"2\">Unigram spok 0.667 (0.004)</td><td>0.631 (0.006)</td></tr><tr><td>F1 A1</td><td>0.634 (0.010)</td><td>0.744 (0.010)</td><td colspan=\"2\">Trigram news 0.670 (0.005)</td><td>0.634 (0.006)</td></tr><tr><td>F1 A2</td><td>0.626 (0.009)</td><td>0.725 (0.007)</td><td colspan=\"2\">Unigram mag 0.666 (0.005)</td><td>0.634 (0.006)</td></tr><tr><td>F1 B1</td><td>0.713 (0.004)</td><td>0.803 (0.002)</td><td colspan=\"2\">Unigram news 0.667 (0.004)</td><td>0.604 (0.005)</td></tr><tr><td>F1 B2</td><td>0.671 (0.006)</td><td>0.751 (0.003)</td><td>Bigram mag</td><td>0.661 (0.004)</td><td>0.613 (0.005)</td></tr><tr><td>F1 C1</td><td>0.467 (0.007)</td><td>0.614 (0.008)</td><td>KolDef</td><td>0.655 (0.003)</td><td>0.626 (0.004)</td></tr><tr><td>F1 C2</td><td>0.374 (0.044)</td><td>0.419 (0.038)</td><td>KolDefMor</td><td>0.653 (0.003)</td><td>0.622 (0.005)</td></tr></table>", |
| "num": null |
| } |
| } |
| } |
| } |