| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:47:40.300233Z" |
| }, |
| "title": "A Hybrid System for NLPTEA-2020 CGED Shared Task", |
| "authors": [ |
| { |
| "first": "Meiyuan", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "NetEase Youdao Information Technology (Beijing) Co., LTD", |
| "location": {} |
| }, |
| "email": "fangmeiyuan@youdao.com" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "NetEase Youdao Information Technology (Beijing) Co., LTD", |
| "location": {} |
| }, |
| "email": "fukai@youdao.com" |
| }, |
| { |
| "first": "Jiping", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "NetEase Youdao Information Technology (Beijing) Co., LTD", |
| "location": {} |
| }, |
| "email": "wangjp@youdao.com" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "NetEase Youdao Information Technology (Beijing) Co., LTD", |
| "location": {} |
| }, |
| "email": "liuyang@youdao.com" |
| }, |
| { |
| "first": "Jin", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "NetEase Youdao Information Technology (Beijing) Co., LTD", |
| "location": {} |
| }, |
| "email": "huangjin@youdao.com" |
| }, |
| { |
| "first": "Yitao", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "NetEase Youdao Information Technology (Beijing) Co., LTD", |
| "location": {} |
| }, |
| "email": "duan@youdao.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper introduces our system at NLPTEA2020 shared task for CGED, which is able to detect, locate, identify and correct grammatical errors in Chinese writings. The system consists of three components: GED, GEC, and post processing. GED is an ensemble of multiple BERT-based sequence labeling models for handling GED tasks. GEC performs error correction. We exploit a collection of heterogenous models, including Seq2Seq, GECToR and a candidate generation module to obtain correction candidates. Finally in the post processing stage, results from GED and GEC are fused to form the final outputs. We tune our models to lean towards optimizing precision, which we believe is more crucial in practice. As a result, among the six tracks in the shared task, our system performs well in the correction tracks: measured in F1 score, we rank first, with the highest precision, in the TOP3 correction track and third in the TOP1 correction track, also with the highest precision. Ours are among the top 4 to 6 in other tracks, except for FPR where we rank 12. And our system achieves the highest precisions among the top 10 submissions at IDENTIFICATION and POSITION tracks.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper introduces our system at NLPTEA2020 shared task for CGED, which is able to detect, locate, identify and correct grammatical errors in Chinese writings. The system consists of three components: GED, GEC, and post processing. GED is an ensemble of multiple BERT-based sequence labeling models for handling GED tasks. GEC performs error correction. We exploit a collection of heterogenous models, including Seq2Seq, GECToR and a candidate generation module to obtain correction candidates. Finally in the post processing stage, results from GED and GEC are fused to form the final outputs. We tune our models to lean towards optimizing precision, which we believe is more crucial in practice. As a result, among the six tracks in the shared task, our system performs well in the correction tracks: measured in F1 score, we rank first, with the highest precision, in the TOP3 correction track and third in the TOP1 correction track, also with the highest precision. Ours are among the top 4 to 6 in other tracks, except for FPR where we rank 12. And our system achieves the highest precisions among the top 10 submissions at IDENTIFICATION and POSITION tracks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "With the rapid growth of online education platforms and the advance of natural language processing (NLP) techniques, recent years have seen an increased interest in automatic Grammatical Error Diagnosis (GED) and Grammatical Error Correction (GEC). Shared tasks such as CoNLL-2013 , CoNLL-2014 and BEA-2019 (Ng et al., 2013 Bryant et al., 2019) were held to correct grammatical errors in essays written by learners of * Equal contribution.", |
| "cite_spans": [ |
| { |
| "start": 270, |
| "end": 280, |
| "text": "CoNLL-2013", |
| "ref_id": null |
| }, |
| { |
| "start": 281, |
| "end": 293, |
| "text": ", CoNLL-2014", |
| "ref_id": null |
| }, |
| { |
| "start": 294, |
| "end": 306, |
| "text": "and BEA-2019", |
| "ref_id": null |
| }, |
| { |
| "start": 307, |
| "end": 323, |
| "text": "(Ng et al., 2013", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 324, |
| "end": 344, |
| "text": "Bryant et al., 2019)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "English as a Foreign Language (EFL). State-of-theart GEC systems for EFL learners have achieved impressive F 0.5 scores of 66.5 on CoNLL-2014 (test) and 73.7 on BEA-2019 (test).", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 148, |
| "text": "CoNLL-2014 (test)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Despite the great success of English GEC systems, Chinese Grammatical Error Detection (CGED) and Correction (CGEC) applications yet remain relatively unexplored. Chinese, on the other hand, is quite different from western languages such as English: There are more than 3,000 commonly used Chinese characters, while English has only 26 in total; Chinese uses tones to indicate various meanings, while English uses them to express emotions; Chinese emphasizes the meaning of expressions, usually resulting in short sentences without complex structure often seen in English. Due to the large number of complex characters and flexible sentence structures, Chinese is considered one of the most difficult languages in the world to learn.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Under this circumstance, the workshop on Natural Language Processing Techniques for Educational Applications (NLP-TEA) has been organizing shared tasks for CGED (Yu et al., 2014; Lee et al., 2015 Lee et al., , 2016 Rao et al., 2017 Rao et al., , 2018 to help learners of Chinese as a Foreign Language (CFL) since 2014. The shared tasks provide common test conditions for researchers from both industry and academia. We believe they are very beneficial to advancing CGED technology.", |
| "cite_spans": [ |
| { |
| "start": 161, |
| "end": 178, |
| "text": "(Yu et al., 2014;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 179, |
| "end": 195, |
| "text": "Lee et al., 2015", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 196, |
| "end": 214, |
| "text": "Lee et al., , 2016", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 215, |
| "end": 231, |
| "text": "Rao et al., 2017", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 232, |
| "end": 250, |
| "text": "Rao et al., , 2018", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper introduces our work on this year's CGED shared task. The task requires both error detection and correction, and we use a hybrid system to handle both. It uses as building blocks models designed for various NLP tasks, including BERTbased sequence labeling models, Seq2Seq, and GECToR. We tune our models to lean towards optimizing precision, which we believe is more crucial in practice. The performance is further improved by using synthetic data generated for individual tasks. Our system performs well in the correction tracks: measured in F1 score, we rank first, with the highest precision, in the TOP3 correction track and third in the TOP1 correction track, also with the highest precision. Ours are among the top 4 to 6 in other tracks, except for FPR where we rank 12. And our system achieves the highest precisions among the top 10 submissions at IDENTIFICA-TION and POSITION tracks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of this paper is organized as follows: A brief description of the CGED shared task is given in Section 2, followed by an overview of prior work in Section 3. Section 4 introduces our system in detail, and Section 5 demonstrates the experimental results. Finally, Section 6 concludes this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Generally, the CGED shared task classifies grammatical errors found in Chinese writings into four different classes, i.e., redundant words (R), missing words (M), word selection errors (S), word ordering errors (W). Table 1 gives some examples of the errors, which are sampled from CGED 2020 training data. It should be noted that various error types may occur more than once in one sentence.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 216, |
| "end": 223, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "System performance is measured at the following levels:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 Detection-level. At this level, developed systems are required to distinguish whether a sentence contains the above-mentioned errors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 Identification-level. At this level, developed systems need to identify the exact error types embedded in input sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 Position-level. At this level, in addition to the error types, developed systems are asked to provide the positional information, indicating where the specific error occurs. For example, triples (5, 5, R) and (2, 3, W) are expected for S and W errors shown in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 262, |
| "end": 269, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 Correction-level. At this level, developed systems are required to provide up to 3 potential correction candidates for S or M errors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Grammatical Error Diagnosis. GED tasks are usually treated as a kind of sequential labeling problem. The common solution to this problem is utilizing the Long Short-Term Memory (LSTM) -Conditional Random Fields (CRF) model Liao et al., 2017; Fu et al., 2018b; Li et al., 2018) . Performance of these approaches are usually highly dependent on the handcrafted features fed into the LSTM layer. Fu et al., 2018b) .", |
| "cite_spans": [ |
| { |
| "start": 223, |
| "end": 241, |
| "text": "Liao et al., 2017;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 242, |
| "end": 259, |
| "text": "Fu et al., 2018b;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 260, |
| "end": 276, |
| "text": "Li et al., 2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 393, |
| "end": 410, |
| "text": "Fu et al., 2018b)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Grammatical Error Correction. Unlike the GED tasks, GEC tasks has been mostly treated as the machine translation problem. To the best of our knowledge, the multi-layer convolutional neural network accompanied by a large language model (Chollampatt and Ng, 2018) is considered as the first Neural Machine Translation (NMT)-like approach to handle GEC tasks in English. Then Ge et al. (2018) and ; Fu et al. (2018b) proposed to use recurrent neural networks, while recent work Grundkiewicz et al., 2019; Lichtarge et al., 2019; Fu et al., 2018a) made use of the Transformer (Vaswani et al., 2017) . Specially, GECToR (Omelianchuk et al., 2020) , which considered the English GEC task as a sequential labeling problem, has obtained competitive results to previous GEC systems.", |
| "cite_spans": [ |
| { |
| "start": 235, |
| "end": 261, |
| "text": "(Chollampatt and Ng, 2018)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 373, |
| "end": 389, |
| "text": "Ge et al. (2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 396, |
| "end": 413, |
| "text": "Fu et al. (2018b)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 475, |
| "end": 501, |
| "text": "Grundkiewicz et al., 2019;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 502, |
| "end": 525, |
| "text": "Lichtarge et al., 2019;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 526, |
| "end": 543, |
| "text": "Fu et al., 2018a)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 572, |
| "end": 594, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 615, |
| "end": 641, |
| "text": "(Omelianchuk et al., 2020)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The overall architecture of the developed system is depicted in Fig. 1 . The proposed system can be functionally divided into three parts: GED, GEC, and post-processing. The GED framework is responsible for error diagnosis at detection, identification and position levels, while the GEC framework provides possible candidates for detected S and M errors. Finally, the post-processing module takes results from the GED and GEC frameworks and fuse them into the final form of the system outputs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 64, |
| "end": 70, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Pre-training on synthetic data is crucial for the present GEC and GED tasks since the parallel training data are still extremely scarce. It is found that the proposed basic GED models, Seq2Seq GEC models and GECToR models also benefit from synthetic data. Following previous work on English GEC tasks (Zhao et al., 2019; 2019; Xu et al., 2019) , the synthetic data generation process in this work operates on two different levels, i.e., word-level and character-level.", |
| "cite_spans": [ |
| { |
| "start": 301, |
| "end": 320, |
| "text": "(Zhao et al., 2019;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 327, |
| "end": 343, |
| "text": "Xu et al., 2019)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Word-level. At this level, error-free sentences are firstly segmented into words using the selfdeveloped tokenizer. Then the following wordlevel errors are randomly added to the error-free sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Transposition: change the positions of words,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where new positions are obtained by adding rounded bias to the original position values. The bias is sampled from a normal distribution with mean 0.0 and standard deviation 0.5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Deletion: delete a word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Insertion: add a word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Substitution: replace the current word with a random word in Chinese dictionary with a probability of 50%; replace the word with one of the synonyms generated by Chinese Synonyms toolkit 1 with a probability of 40%; replace the word with a word from its confusion set 2 with a probability of 10%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The error probabilities of deletion and insertion are sampled from a normal distribution with mean 0.015 and standard deviation 0.2, while the error probability of substitution is sampled from a normal distribution with mean 0.075 and standard deviation 0.2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Character-level. On top of the word-level errors, we also add the following character-level errors to 20% of the words, simulating spelling errors that occur in the real-world.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Transposition: flip two consecutive characters existing in the current word with a probability of 10%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Deletion: delete a character in the word with a probability of 10%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Insertion: add a random Chinese character to the word with a probability of 10%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u2022 Substitution: substitute a character in the word with a probability of 30%, among which 70% of the characters are replaced by characters from their confusion sets 3 , and the other 30%", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "are replaced by random characters sampled from Chinese dictionary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synthetic Data Generation.", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Basic GED Models. Recently, masked language models such as Bidirectional Encoder Representations from Transformers (BERT, Devlin et al., 2018) , XLNet , and Generative Pre-Training 3 (GPT-3, Brown et al., 2020) have achieved superior performance on down-stream Natural Language Processing (NLP) tasks including question answering, language inference, sentence classification, etc.", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 142, |
| "text": "Devlin et al., 2018)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 191, |
| "end": 210, |
| "text": "Brown et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To benefit from those efforts, we propose to use the BERT based sequential labeling model as our basic GED model rather than using the LSTM-CRF model. In general, BERT stacks 12 (BERT BASE ) or 24 (BERT LARGE ) identical Transformer blocks, which either takes a single sentence or a pair of sentences as input and outputs a hidden vector for each input token as well as a special [CLS] token for the whole input sentence (pair). Here, we denote the input sequence of Chinese characters as X = (x 1 , x 2 , ..., x n ), the final hidden vector generated by BERT as H = (h 1 , h 2 , ..., h n ), and the output BIO tags as Y = (y 1 , y 2 , ..., y n ). For better comprehension, we give some examples of BIO tags in Table 2 . Then for an input token x i and a specific BIO tag t, the conditional probability of x i being labeled as t is derived using:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 711, |
| "end": 718, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "P (y i = t|X) = sof tmax (W h i + b) .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "(1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Here, X denotes the input sequence, h i is the final hidden state of BERT, W and b are model parameters. The tag with the largest conditional probability will be chosen as the final output corresponding to the input token x i .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Distribution Ensemble. Top results are usually achieved by ensemble techniques (Zheng et al., 2016; Fu et al., 2018b) , and this work also benefits from model ensemble approaches. Specifically, we assume that there are M different basic GED models {m 1 , m 2 , ..., m M }. Then for each input", |
| "cite_spans": [ |
| { |
| "start": 79, |
| "end": 99, |
| "text": "(Zheng et al., 2016;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 100, |
| "end": 117, |
| "text": "Fu et al., 2018b)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "sequence X = (x 1 , x 2 , ..., x n ), we have M output sequences {Y 1 , Y 2 , ..., Y M }.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The distribution ensemble based on M different models can be written by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (y|X) = 1 M M k=1 P k (y|X; \u03b8 k ) .", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Here, P (y|X) denotes the conditional probability of final prediction, \u03b8 k indicates the trainable model parameters of kth model (m k ), and P k (y|X; \u03b8 k ) is the conditional probability generated by model m k .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Voting Mechanisms. Voting mechanisms are proposed for further improvement on overall performance, especially for model precisions. In this work, we explore the following two different voting mechanisms:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 Majority voting. In this mechanism, each output of the ensemble model is assigned the same weight, and the system selects the tag with the largest weight as the final output.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 Using F1-Score as weight. In this mechanism, we first evaluate the ensemble models using the development set and obtain corresponding F1 scores. Then the overall F1 scores serve as the weight for the ensemble models during the inference step.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Diagnosis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "As shown in Figure 1 , the GEC framework consists of Seq2Seq GEC models, GECToR models, and a candidates generation module.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Seq2Seq GEC Models. This work explores two kinds of Seq2Seq GEC models: one is the regular Transformer model (Vaswani et al., 2017) , and the other is the copy augmented Transformer model (Zhao et al., 2019) . The attention-based Transformer is the most widely used sequence transduction model in Natural Language Processing (NLP) area that are capable of a broad spectrum of tasks (Vaswani et al., 2017; Lample et al., 2018; Devlin et al., 2018; , including machine translation, text style transfer, reading comprehension, etc. Transformers employ Seq2Seq structures that are usually built up by stacking encoder and decoder layers. Encoder layers consist of a multi-head self-attention layer followed by a position-wise feed-forward layer, while decoder layers consist of a multi-head self-attention layer, a multi-head cross-attention layer and a positionwise feed-forward layer. Residual connections and layer normalizations are used to improve the performance of deep Transformers.", |
| "cite_spans": [ |
| { |
| "start": 109, |
| "end": 131, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 188, |
| "end": 207, |
| "text": "(Zhao et al., 2019)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 382, |
| "end": 404, |
| "text": "(Vaswani et al., 2017;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 405, |
| "end": 425, |
| "text": "Lample et al., 2018;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 426, |
| "end": 446, |
| "text": "Devlin et al., 2018;", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The copy-augmented Transformer was originally proposed for text summarization tasks (Gu et 2016; See et al., 2017) and subsequently revamped to handle GEC tasks (Zhao et al., 2019; Choe et al., 2019) . Unlike the normal Transformers, copyaugmented Transformers are able to copy units (e.g. characters, sub-words, or words) from the source sentence, since the final probability distribution of a unit is the combination of a generative distribution and a copy distribution, balanced by a factor \u03b1 copy \u2208 [0, 1]. With a larger copy factor, the output units tend to copy from the source rather than generating their own, and vice versa.", |
| "cite_spans": [ |
| { |
| "start": 84, |
| "end": 90, |
| "text": "(Gu et", |
| "ref_id": null |
| }, |
| { |
| "start": 97, |
| "end": 114, |
| "text": "See et al., 2017)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 161, |
| "end": 180, |
| "text": "(Zhao et al., 2019;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 181, |
| "end": 199, |
| "text": "Choe et al., 2019)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "al., Input \u56e0 \u4e3a \uff0c \u96fe \u70df \u523a \u6fc0 \u5c31 \u5bf9 \u4eba \u4f53 \u4f1a \u6709 \u5371 \u5bb3 \u3002 (Y\u012bn w\u00e8i , w\u00f9 y\u0101n c\u00ec j\u012b ji\u00f9 du\u00ec r\u00e9n t\u01d0 hu\u00ec y\u01d2u w\u0113i h\u00e0i .) Output O O O B-S I-S O O O B-W I-W I-W I-W O O O O Input \u6211 \u4e0d \u53ef \u4ee5 \u627e \u5230 \u4e86 \u5728 \u54ea \u91cc \u6211 \u4f1a \u4e70 \u83dc \u3002 ( W\u01d2 b\u00f9 k\u011b y\u01d0 zh\u01ceo d\u00e0o le z\u00e0i n\u01ce l\u01d0 w\u01d2 hu\u00ec m\u01cei c\u00e0i . ) Output O B-S I-S I-S O O B-M B-S O O B-R I-R O O O", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "GECToR Models. Similar to the Parallel Iterative Edit (PIE) model (Awasthi et al., 2019) , GEC-ToR (Omelianchuk et al., 2020) treats the GEC task as a sequential labeling problem. The core of the approach is the design of special output tags, which indicate the differences between source sentences and target sentences. In order to obtain the tags, minimal edits of the characters are firstly extracted based on the modified Levenshtein distance. Then the edits are converted to the following tags:", |
| "cite_spans": [ |
| { |
| "start": 66, |
| "end": 88, |
| "text": "(Awasthi et al., 2019)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 99, |
| "end": 125, |
| "text": "(Omelianchuk et al., 2020)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 $KEEP, indicates that the character is unchanged.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 $APPEND X, indicates that there is a character X missing after the current character.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 $REPLACE X, indicates that the current character should be replaced by character X.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 $REORDER, indicates that the current character is a part of the chunk where the reorder error occurs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 $DELETE, indicates that the current character should be removed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Identical to the basic GED models, GECToR model also stacks the fully connected layer and the softmax layer over the Transformer encoder.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Candidate Generation. During the experiment, we found that the set of correction candidates shares a large overlap across each year's training data. It is also consistent with intuition since there exist commonly confused words or characters in Chinese. To make use of this observation, we propose a candidate generation module based on a Chinese language model. Firstly, a Chinese characterlevel 5-gram language model (denoted by L in the following) is trained based on 30 million Chinese sentences. Then L is used to select the k most appropriate candidate words from a large set of candidates, which is extracted from the CGED training data, to replace the words in the original sentences according to the error type and position in the detection phase. Finally, the candidates along with those generated by Seq2Seq models and GECToR models are all sent to the post-processing module to obtain the final output.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammatical Error Correction", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Post-processing Outputs of GED Models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Considering that one input token is allowed to be labeled as multiple error types depending on the actual situation, we propose to apply the following heuristics to the outputs of the GED framework in the post-processing stage.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "1. If current tag O is followed by a tag I-X and the last tag is B-X, where X indicates a specific error type, then the current tag will be replaced by I-X.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "2. If one tag set is nested into another one, they will be decomposed based on their starting and ending points. For example, when the following case happens, (1, 4, X 1 ) and (2, 3, X 2 ) are extracted as the final outputs instead of (1, 1, X 1 ) and (2, 3, X 2 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Tags:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "B-X 1 B-X 2 I-X 2 I-X 1 Position: 1 2 3 4", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Re-ranking the Correction Candidates. To rerank and select the elite candidates from those proposed by the three GEC models, this work proposes <DOC> <TEXT id=\"98200405109523100360_2_6x2\"> !!\"##$%&'()*+,'-./01&231&2456789:;<='>?@/ </TEXT> <CORRECTION> !!$##%&'AB*+,'-./01&231&2456789:;<='>C?@/ </CORRECTION> <ERROR start_off=\"3\" end_off=\"3\" type=\"R\"></ERROR> <ERROR start_off=\"4\" end_off=\"6\" type=\"W\"></ERROR> <ERROR start_off=\"10\" end_off=\"11\" type=\"S\"></ERROR> <ERROR start_off=\"39\" end_off=\"39\" type=\"M\"></ERROR> </DOC> a Chinese BERT-based 4 scoring model. The model takes a sentence and the corresponding correction candidate as input and returns the candidate's score, which lies between 0.0 and 1.0.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Since there is no ready-made data for training this kind of scoring model, it leads us to the data generation process. There are basically two kinds of data needed by the model, including positive samples and negative samples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Positive samples can be directly generated from the CGED training data based on the process depicted in Fig. 2 . We firstly design a rule-based system to extract word-level edits from the training data. Obviously, extracted edits will include all kinds of errors (R, S, W and M). However, we only keep the edits related to S and M errors, since R and W errors are not taken into consideration in the correction task. Each edit can then be converted to a training sample, which can be denoted as a triple (s, w, t), where s indicates the input sentence, w is the correction candidate, and t is the fitness score of the candidate. Specifically, for the input sentence s, we insert \" \" to the left and right of the chunk where S error occurs, and we add the \" \" symbol to the position where the M error occurs, as shown in Fig. 2 . Considering that all training data provided by CGED shared tasks are manually annotated data, we assign higher scores (1.0 in this work) to these candidates.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 104, |
| "end": 110, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 820, |
| "end": 826, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The model cannot be trained only using positive samples. Hence we propose a negative data generation algorithm, as shown in Algorithm 1. Here we define D p as the collection of all positive training data, W p as the collection of all the candidate words in D p , S pe as the collection of all the sentences in D p . For each input sentence s in S pe , we score every word in the candidate set to find out unsuitable candidates for s. More specifically, a new sentence s sub , which is reconstructed by substituting the corresponding word in s with the candidate word or inserting the candidate word to s, is scored by L. Then, we select k candidates which have the lowest scores. Finally, we randomly choose one candidate (i.e., W cand ) from the k candidates, and form a negative sample (s, W cand , 0.0) for the proposed scoring model. Algorithm 1 Negative Training Data Generation 1: Input: Spe, Wp, L, k 2: Output: negative training data Dn 3: Dn \u2190 {} 4: P P (t) \u2190 score of sentence t calculated by L 5: for i in range(len (Spe)) do 6:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "s \u2190 Spe[i] 7:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "for w \u2208 Wp do 8:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "s sub \u2190 replace the word in s with w 9:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "ps sub \u2190 P P (s sub ) 10: end for 11:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Stop k \u2190 top k from Wp based on ps sub 12:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "w cand \u2190 random.sample(Stop k , 1) 13:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Dn \u2190 Dn + (s, w cand , 0) 14: end for Inspired by the idea of \"next sentence prediction\" task (Devlin et al., 2018) , we concatenate the input sentence s and the correction candidate w as a pair S pair , and then feed it into our scoring model. Seq2Seq models, GECToR models and the candidate generation module tend to produce different candidates. Hence in the re-ranking stage, the correction candidate and its corresponding input sentence are fed into the scoring model one by one. We then select the top three candidates with the highest score for each input sentence.", |
| "cite_spans": [ |
| { |
| "start": 94, |
| "end": 115, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Fusion of Results. It should be noted that the training data and vocabulary of the GEC models are different. Therefore, directly applying the ensemble techniques is infeasible. Instead, we propose to obtain the final edits by the following three steps. First, the corrected sentences produced by multiple GEC models are aligned with the original ones and the edits are extracted automatically by our rule-based extraction system. We also generate several edits with the candidate generation module based on the results of the detection phase. Second, we fuse these edits based on their error positions and types. In other words, a series of candidate words are generated for each error position. Third, we discard the edits that are not consistent with the detection results. This step is vital since the training processes of Seq2Seq models and GEC-ToR models are completely independent and may produce conflict edits.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "To improve the accuracy of correction candi- dates, we set a threshold to filter the candidates with less confidence. Finally, we obtain the final fusion result after all the processes described above. Table 3 shows an example of the fusion process.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 202, |
| "end": 209, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Post-processing", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The proposed system utilizes training data provided by the CGED-2016, CGED-2017, CGED-2018 and CGED-2020 shared tasks. Table 5 : Overall performance of the developed system on CGED 2020 shared task. vided by CGED shared task, we also utilize the data provided by the NLPCC-2018 shared task (Zhao et al., 2018) 5 to train our GEC models. Moreover, NetEase News Corpus is used to generate synthetic data.", |
| "cite_spans": [ |
| { |
| "start": 290, |
| "end": 311, |
| "text": "(Zhao et al., 2018) 5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 119, |
| "end": 126, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "As previously stated in Section 2, submitted results are evaluated at four different levels, i.e. detection-level, identification-level, position-level and correction-level. At each level, precision (Pre.), recall (Rec.) and F1 score are calculated based on the gold standard and the system outputs. Specially at the detection-level, false positive rate (FPR) as well as accuracy is calculated in addition to the above evaluation metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metrics", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In this work, we utilize the Chinese pre-trained language models with large configuration (24 layers) including Robustly optimized BERT pre-training approach (RoBERTa, Liu et al., 2019) 6 and pretraining with whole word masking for Chinese BERT (Cui et al., 2019) 7 as the starting point of the fine-tuning process. We also tested Chinese BERT 8 , but it resulted in poorer performance on the GED task than the above mentioned two models. We trained 30 basic GED models based on various pretrained models along with different initialization seeds. Then we averaged the last several checkpoints of models and apply distribution ensemble 5 http://tcci.ccf.org.cn/conference/ 2018/taskdata.php 6 https://github.com/brightmart/ roberta_zh 7 https://github.com/ymcui/ Chinese-BERT-wwm 8 https://github.com/google-research/ bert/blob/master/multilingual.md on every 4 or 5 models. GEC models also follow similar steps to obtain final models.", |
| "cite_spans": [ |
| { |
| "start": 245, |
| "end": 265, |
| "text": "(Cui et al., 2019) 7", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Details", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The overall performance of our developed system is given in Table 5 . It can be seen that the system achieves F1 scores up to 0.8926, 0.6508 and 0.3762 at the detection-level, identification-level and position-level, respectively. As for the correction task, we achieve F1 scores of 0.1845 and 0.1879 at TOP1 and TOP3 correction track.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 60, |
| "end": 67, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Among the 43 submissions for the detection task, our system rank 4 to 6 at detection, identification and position tracks, but rank 12 at FPR track. It is remarkable that this system achieves the highest precisions among the top 10 submissions at identification and position tracks. This system performs even better at the correction tracks. It achieves the highest F1 score also with the highest precision at TOP3 correction track. Besides, the system gets the highest precision with third-highest F1 score at TOP1 correction track, however, the gap is only 0.0046.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "This paper describes our system on NLPTEA-2020 CGED shared task. To make the system more robust against data sparseness and lack of data, we adopt the synthetic data generation process during model training. Besides utilizing up-to-date model architectures, we also carefully optimized the system performance by employing ensemble techniques, voting mechanisms and rule-based postprocessing. We plan to integrate more grammatical features into the GED and GEC models and optimize the post-processing algorithm to further improve the system performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://github.com/chatopera/Synonyms 2 extracted from common mistakes made by students.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://nlp.ee.ncu.edu.tw/resource/csc. html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/google-research/ bert/blob/master/multilingual.md", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Parallel iterative edit models for local sequence transduction", |
| "authors": [ |
| { |
| "first": "Abhijeet", |
| "middle": [], |
| "last": "Awasthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunita", |
| "middle": [], |
| "last": "Sarawagi", |
| "suffix": "" |
| }, |
| { |
| "first": "Rasna", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Sabyasachi", |
| "middle": [], |
| "last": "Ghosh", |
| "suffix": "" |
| }, |
| { |
| "first": "Vihari", |
| "middle": [], |
| "last": "Piratla", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4260--4270", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1435" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abhijeet Awasthi, Sunita Sarawagi, Rasna Goyal, Sabyasachi Ghosh, and Vihari Piratla. 2019. Parallel iterative edit models for local sequence transduction. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 4260- 4270, Hong Kong, China. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Ilya Sutskever, and Dario Amodei. 2020. Language models are fewshot learners. Computing Research Repository", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [ |
| "B" |
| ], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Mann", |
| "suffix": "" |
| }, |
| { |
| "first": "Nick", |
| "middle": [], |
| "last": "Ryder", |
| "suffix": "" |
| }, |
| { |
| "first": "Melanie", |
| "middle": [], |
| "last": "Subbiah", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [], |
| "last": "Kaplan", |
| "suffix": "" |
| }, |
| { |
| "first": "Prafulla", |
| "middle": [], |
| "last": "Dhariwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Arvind", |
| "middle": [], |
| "last": "Neelakantan", |
| "suffix": "" |
| }, |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Shyam", |
| "suffix": "" |
| }, |
| { |
| "first": "Girish", |
| "middle": [], |
| "last": "Sastry", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanda", |
| "middle": [], |
| "last": "Askell", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandhini", |
| "middle": [], |
| "last": "Agarwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Ariel", |
| "middle": [], |
| "last": "Herbert-Voss", |
| "suffix": "" |
| }, |
| { |
| "first": "Gretchen", |
| "middle": [], |
| "last": "Krueger", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Henighan", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Ramesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "M" |
| ], |
| "last": "Ziegler", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Clemens", |
| "middle": [], |
| "last": "Winter", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Hesse", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Sigler", |
| "suffix": "" |
| }, |
| { |
| "first": "Mateusz", |
| "middle": [], |
| "last": "Litwin", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.14165" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Ben- jamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few- shot learners. Computing Research Repository, arXiv:2005.14165. Version 4.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The BEA-2019 shared task on grammatical error correction", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Bryant", |
| "suffix": "" |
| }, |
| { |
| "first": "Mariano", |
| "middle": [], |
| "last": "Felice", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "\u00d8istein", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "52--75", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-4406" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher Bryant, Mariano Felice, \u00d8istein E. An- dersen, and Ted Briscoe. 2019. The BEA-2019 shared task on grammatical error correction. In Pro- ceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 52-75, Florence, Italy. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A neural grammatical error correction system built on better pre-training and sequential transfer learning", |
| "authors": [ |
| { |
| "first": "Yo Joong", |
| "middle": [], |
| "last": "Choe", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiyeon", |
| "middle": [], |
| "last": "Ham", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyubyong", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "Yeoil", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "213--227", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-4423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yo Joong Choe, Jiyeon Ham, Kyubyong Park, and Yeoil Yoon. 2019. A neural grammatical error cor- rection system built on better pre-training and se- quential transfer learning. In Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 213-227, Florence, Italy. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A multilayer convolutional encoder-decoder neural network for grammatical error correction", |
| "authors": [ |
| { |
| "first": "Shamil", |
| "middle": [], |
| "last": "Chollampatt", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shamil Chollampatt and Hwee Tou Ng. 2018. A multi- layer convolutional encoder-decoder neural network for grammatical error correction. In Proceedings of the Thirty-Second AAAI Conference on Artificial In- telligence, New Orleans, Louisiana, USA.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Pre-training with whole word masking for Chinese BERT. Computing Research Repository", |
| "authors": [ |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Cui", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ziqing", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shijin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Guoping", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.08101" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, and Guoping Hu. 2019. Pre-training with whole word masking for Chi- nese BERT. Computing Research Repository, arXiv:1906.08101.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Style transformer: Unpaired text style transfer without disentangled latent representation", |
| "authors": [ |
| { |
| "first": "Ning", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianze", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuanjing", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "5997--6007", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1601" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ning Dai, Jianze Liang, Xipeng Qiu, and Xuanjing Huang. 2019. Style transformer: Unpaired text style transfer without disentangled latent represen- tation. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguis- tics, pages 5997-6007, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Computing Research Repository", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: Pre-training of deep bidirectional transformers for language understanding. Computing Research Repository, arXiv:1810.04805. Version 2.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Youdao's winning solution to the NLPCC-2018 task 2 challenge: A neural machine translation approach to Chinese grammatical error correction", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jin", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yitao", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Natural Language Processing and Chinese Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "341--350", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Fu, Jin Huang, and Yitao Duan. 2018a. Youdao's winning solution to the NLPCC-2018 task 2 chal- lenge: A neural machine translation approach to Chi- nese grammatical error correction. In Natural Lan- guage Processing and Chinese Computing, pages 341-350, Cham. Springer International Publishing.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Chinese grammatical error diagnosis using statistical and prior knowledge driven features with probabilistic ensemble enhancement", |
| "authors": [ |
| { |
| "first": "Ruiji", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengqi", |
| "middle": [], |
| "last": "Pei", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiefu", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Dechuan", |
| "middle": [], |
| "last": "Teng", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "Shijin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Guoping", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 5th Workshop on Natural Language Processing Techniques for Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "52--59", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-3707" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruiji Fu, Zhengqi Pei, Jiefu Gong, Wei Song, Dechuan Teng, Wanxiang Che, Shijin Wang, Guoping Hu, and Ting Liu. 2018b. Chinese grammatical error di- agnosis using statistical and prior knowledge driven features with probabilistic ensemble enhancement. In Proceedings of the 5th Workshop on Natural Lan- guage Processing Techniques for Educational Appli- cations, pages 52-59, Melbourne, Australia. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Fluency boost learning and inference for neural grammatical error correction", |
| "authors": [ |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Ge", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1055--1065", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1097" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tao Ge, Furu Wei, and Ming Zhou. 2018. Fluency boost learning and inference for neural grammati- cal error correction. In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1055- 1065, Melbourne, Australia. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Near human-level performance in grammatical error correction with hybrid machine translation", |
| "authors": [ |
| { |
| "first": "Roman", |
| "middle": [], |
| "last": "Grundkiewicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcin", |
| "middle": [], |
| "last": "Junczys-Dowmunt", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "284--290", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-2046" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roman Grundkiewicz and Marcin Junczys-Dowmunt. 2018. Near human-level performance in grammat- ical error correction with hybrid machine transla- tion. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technolo- gies, Volume 2 (Short Papers), pages 284-290, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Neural grammatical error correction systems with unsupervised pre-training on synthetic data", |
| "authors": [ |
| { |
| "first": "Roman", |
| "middle": [], |
| "last": "Grundkiewicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcin", |
| "middle": [], |
| "last": "Junczys-Dowmunt", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Heafield", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "252--263", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-4427" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roman Grundkiewicz, Marcin Junczys-Dowmunt, and Kenneth Heafield. 2019. Neural grammatical error correction systems with unsupervised pre-training on synthetic data. In Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 252-263, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Incorporating copying mechanism in sequence-to-sequence learning", |
| "authors": [ |
| { |
| "first": "Jiatao", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengdong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [ |
| "K" |
| ], |
| "last": "Victor", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1631--1640", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1154" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiatao Gu, Zhengdong Lu, Hang Li, and Victor O.K. Li. 2016. Incorporating copying mechanism in sequence-to-sequence learning. In Proceedings of the 54th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers), pages 1631-1640, Berlin, Germany. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Approaching neural grammatical error correction as a low-resource machine translation task", |
| "authors": [ |
| { |
| "first": "Marcin", |
| "middle": [], |
| "last": "Junczys-Dowmunt", |
| "suffix": "" |
| }, |
| { |
| "first": "Roman", |
| "middle": [], |
| "last": "Grundkiewicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Shubha", |
| "middle": [], |
| "last": "Guha", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Heafield", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "595--606", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1055" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcin Junczys-Dowmunt, Roman Grundkiewicz, Shubha Guha, and Kenneth Heafield. 2018. Ap- proaching neural grammatical error correction as a low-resource machine translation task. In Proceed- ings of the 2018 Conference of the North Ameri- can Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long Papers), pages 595-606, New Orleans, Louisiana. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Phrase-based & neural unsupervised machine translation", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "5039--5049", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1549" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Myle Ott, Alexis Conneau, Lu- dovic Denoyer, and Marc'Aurelio Ranzato. 2018. Phrase-based & neural unsupervised machine trans- lation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 5039-5049, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Overview of NLP-TEA 2016 shared task for Chinese grammatical error diagnosis", |
| "authors": [ |
| { |
| "first": "Gaoqi", |
| "middle": [], |
| "last": "Lung-Hao Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang-Chih", |
| "middle": [], |
| "last": "Rao", |
| "suffix": "" |
| }, |
| { |
| "first": "Endong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Baolin", |
| "middle": [], |
| "last": "Xun", |
| "suffix": "" |
| }, |
| { |
| "first": "Li-Ping", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 3rd Workshop on Natural Language Processing Techniques for Educational Applications (NLPTEA2016)", |
| "volume": "", |
| "issue": "", |
| "pages": "40--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lung-Hao Lee, Gaoqi Rao, Liang-Chih Yu, Endong Xun, Baolin Zhang, and Li-Ping Chang. 2016. Overview of NLP-TEA 2016 shared task for Chi- nese grammatical error diagnosis. In Proceedings of the 3rd Workshop on Natural Language Pro- cessing Techniques for Educational Applications (NLPTEA2016), pages 40-48, Osaka, Japan. The COLING 2016 Organizing Committee.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Overview of the NLP-TEA 2015 shared task for Chinese grammatical error diagnosis", |
| "authors": [ |
| { |
| "first": "Lung-Hao", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang-Chih", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Li-Ping", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2nd Workshop on Natural Language Processing Techniques for Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "1--6", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W15-4401" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lung-Hao Lee, Liang-Chih Yu, and Li-Ping Chang. 2015. Overview of the NLP-TEA 2015 shared task for Chinese grammatical error diagnosis. In Pro- ceedings of the 2nd Workshop on Natural Language Processing Techniques for Educational Applications, pages 1-6, Beijing, China. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A hybrid system for Chinese grammatical error diagnosis and correction", |
| "authors": [ |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Junpei", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Zuyi", |
| "middle": [], |
| "last": "Bao", |
| "suffix": "" |
| }, |
| { |
| "first": "Hengyou", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Guangwei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Linlin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 5th Workshop on Natural Language Processing Techniques for Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "60--69", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-3708" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen Li, Junpei Zhou, Zuyi Bao, Hengyou Liu, Guang- wei Xu, and Linlin Li. 2018. A hybrid system for Chinese grammatical error diagnosis and correction. In Proceedings of the 5th Workshop on Natural Lan- guage Processing Techniques for Educational Appli- cations, pages 60-69, Melbourne, Australia. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "YNU-HPCC at IJCNLP-2017 task 1: Chinese grammatical error diagnosis using a bi-directional LSTM-CRF model", |
| "authors": [ |
| { |
| "first": "Quanlei", |
| "middle": [], |
| "last": "Liao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinnan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuejie", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Asian Federation of Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "73--77", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Quanlei Liao, Jin Wang, Jinnan Yang, and Xuejie Zhang. 2017. YNU-HPCC at IJCNLP-2017 task 1: Chinese grammatical error diagnosis using a bi-directional LSTM-CRF model. In Proceedings of the IJCNLP 2017, Shared Tasks, pages 73-77, Taipei, Taiwan. Asian Federation of Natural Lan- guage Processing.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Corpora generation for grammatical error correction", |
| "authors": [ |
| { |
| "first": "Jared", |
| "middle": [], |
| "last": "Lichtarge", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Alberti", |
| "suffix": "" |
| }, |
| { |
| "first": "Shankar", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Tong", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "3291--3301", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1333" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jared Lichtarge, Chris Alberti, Shankar Kumar, Noam Shazeer, Niki Parmar, and Simon Tong. 2019. Cor- pora generation for grammatical error correction. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Compu- tational Linguistics: Human Language Technolo- gies, Volume 1 (Long and Short Papers), pages 3291-3301, Minneapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "RoBERTa: A robustly optimized BERT pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Computing Research Repository", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A robustly optimized BERT pre- training approach. Computing Research Repository, arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "The CoNLL-2014 shared task on grammatical error correction", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hwee Tou Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Mei", |
| "middle": [], |
| "last": "Siew", |
| "suffix": "" |
| }, |
| { |
| "first": "Ted", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Briscoe", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "Hendy" |
| ], |
| "last": "Hadiwinoto", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Susanto", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bryant", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the Eighteenth Conference on Computational Natural Language Learning: Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "1--14", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/W14-1701" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hwee Tou Ng, Siew Mei Wu, Ted Briscoe, Christian Hadiwinoto, Raymond Hendy Susanto, and Christo- pher Bryant. 2014. The CoNLL-2014 shared task on grammatical error correction. In Proceedings of the Eighteenth Conference on Computational Natural Language Learning: Shared Task, pages 1-14, Balti- more, Maryland. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "The CoNLL-2013 shared task on grammatical error correction", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hwee Tou Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Mei", |
| "middle": [], |
| "last": "Siew", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuanbin", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Hadiwinoto", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tetreault", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Seventeenth Conference on Computational Natural Language Learning: Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "1--12", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hwee Tou Ng, Siew Mei Wu, Yuanbin Wu, Christian Hadiwinoto, and Joel Tetreault. 2013. The CoNLL- 2013 shared task on grammatical error correction. In Proceedings of the Seventeenth Conference on Computational Natural Language Learning: Shared Task, pages 1-12, Sofia, Bulgaria. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "GECToR -grammatical error correction: Tag, not rewrite", |
| "authors": [ |
| { |
| "first": "Kostiantyn", |
| "middle": [], |
| "last": "Omelianchuk", |
| "suffix": "" |
| }, |
| { |
| "first": "Vitaliy", |
| "middle": [], |
| "last": "Atrasevych", |
| "suffix": "" |
| }, |
| { |
| "first": "Artem", |
| "middle": [], |
| "last": "Chernodub", |
| "suffix": "" |
| }, |
| { |
| "first": "Oleksandr", |
| "middle": [], |
| "last": "Skurzhanskyi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "163--170", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kostiantyn Omelianchuk, Vitaliy Atrasevych, Artem Chernodub, and Oleksandr Skurzhanskyi. 2020. GECToR -grammatical error correction: Tag, not rewrite. In Proceedings of the Fifteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 163-170, Seattle, WA, USA\u00e2 \u2020' Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Overview of NLPTEA-2018 share task Chinese grammatical error diagnosis", |
| "authors": [ |
| { |
| "first": "Gaoqi", |
| "middle": [], |
| "last": "Rao", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| }, |
| { |
| "first": "Baolin", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Endong", |
| "middle": [], |
| "last": "Xun", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 5th Workshop on Natural Language Processing Techniques for Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "42--51", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-3706" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gaoqi Rao, Qi Gong, Baolin Zhang, and Endong Xun. 2018. Overview of NLPTEA-2018 share task Chi- nese grammatical error diagnosis. In Proceedings of the 5th Workshop on Natural Language Process- ing Techniques for Educational Applications, pages 42-51, Melbourne, Australia. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "IJCNLP-2017 task 1: Chinese grammatical error diagnosis", |
| "authors": [ |
| { |
| "first": "Gaoqi", |
| "middle": [], |
| "last": "Rao", |
| "suffix": "" |
| }, |
| { |
| "first": "Baolin", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Endong", |
| "middle": [], |
| "last": "Xun", |
| "suffix": "" |
| }, |
| { |
| "first": "Lung-Hao", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Asian Federation of Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gaoqi Rao, Baolin Zhang, Endong Xun, and Lung-Hao Lee. 2017. IJCNLP-2017 task 1: Chinese gram- matical error diagnosis. In Proceedings of the IJC- NLP 2017, Shared Tasks, pages 1-8, Taipei, Taiwan. Asian Federation of Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Get to the point: Summarization with pointergenerator networks", |
| "authors": [ |
| { |
| "first": "Abigail", |
| "middle": [], |
| "last": "See", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1073--1083", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1099" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abigail See, Peter J. Liu, and Christopher D. Manning. 2017. Get to the point: Summarization with pointer- generator networks. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1073- 1083, Vancouver, Canada. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Erroneous data generation for grammatical error correction", |
| "authors": [ |
| { |
| "first": "Shuyao", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiehao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jin", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Long", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "149--158", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-4415" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuyao Xu, Jiehao Zhang, Jin Chen, and Long Qin. 2019. Erroneous data generation for grammatical error correction. In Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 149-158, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Alibaba at IJCNLP-2017 task 1: Embedding grammatical features into LSTMs for Chinese grammatical error diagnosis task", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Pengjun", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Tao", |
| "suffix": "" |
| }, |
| { |
| "first": "Guangwei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Linlin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Luo", |
| "middle": [], |
| "last": "Si", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Asian Federation of Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "41--46", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi Yang, Pengjun Xie, Jun Tao, Guangwei Xu, Linlin Li, and Luo Si. 2017. Alibaba at IJCNLP-2017 task 1: Embedding grammatical features into LSTMs for Chinese grammatical error diagnosis task. In Pro- ceedings of the IJCNLP 2017, Shared Tasks, pages 41-46, Taipei, Taiwan. Asian Federation of Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "XLNet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Russ", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "32", |
| "issue": "", |
| "pages": "5753--5763", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. XLNet: Generalized autoregressive pretraining for language understanding. In Advances in Neural In- formation Processing Systems 32, pages 5753-5763. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Overview of grammatical error diagnosis for learning Chinese as a foreign language", |
| "authors": [ |
| { |
| "first": "Liang-Chih", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lung-Hao", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Liping", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 1st Workshop on Natural Language Processing Techniques for Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "42--47", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liang-Chih Yu, Lung-Hao Lee, and Liping Chang. 2014. Overview of grammatical error diagnosis for learning Chinese as a foreign language. In Pro- ceedings of the 1st Workshop on Natural Language Processing Techniques for Educational Applications, pages 42-47, Nara, Japan.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "CMMC-BDRC solution to the NLP-TEA-2018 Chinese grammatical error diagnosis task", |
| "authors": [ |
| { |
| "first": "Yongwei", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qinan", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Fang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yueguo", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 5th Workshop on Natural Language Processing Techniques for Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "180--187", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-3726" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yongwei Zhang, Qinan Hu, Fang Liu, and Yueguo Gu. 2018. CMMC-BDRC solution to the NLP-TEA- 2018 Chinese grammatical error diagnosis task. In Proceedings of the 5th Workshop on Natural Lan- guage Processing Techniques for Educational Appli- cations, pages 180-187, Melbourne, Australia. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Improving grammatical error correction via pre-training a copy-augmented architecture with unlabeled data", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kewei", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruoyu", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingming", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "156--165", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1014" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Zhao, Liang Wang, Kewei Shen, Ruoyu Jia, and Jingming Liu. 2019. Improving grammatical error correction via pre-training a copy-augmented archi- tecture with unlabeled data. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long and Short Papers), pages 156-165, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Overview of the nlpcc 2018 shared task: Grammatical error correction", |
| "authors": [ |
| { |
| "first": "Yuanyuan", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Natural Language Processing and Chinese Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "439--445", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuanyuan Zhao, Nan Jiang, Weiwei Sun, and Xiaojun Wan. 2018. Overview of the nlpcc 2018 shared task: Grammatical error correction. In Natural Language Processing and Chinese Computing, pages 439-445, Cham. Springer International Publishing.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Chinese grammatical error diagnosis with long short-term memory networks", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiang", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 3rd Workshop on Natural Language Processing Techniques for Educational Applications (NLPTEA2016)", |
| "volume": "", |
| "issue": "", |
| "pages": "49--56", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Zheng, Wanxiang Che, Jiang Guo, and Ting Liu. 2016. Chinese grammatical error diagnosis with long short-term memory networks. In Proceed- ings of the 3rd Workshop on Natural Language Processing Techniques for Educational Applications (NLPTEA2016), pages 49-56, Osaka, Japan. The COLING 2016 Organizing Committee.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "The overall architecture of the developed system.", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "Example of positive data generation process.", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF2": { |
| "text": "Fig.3demonstrates the architecture of the proposed scoring model as well as an example of S pair .", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF3": { |
| "text": "The architecture of the proposed scoring model.", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "content": "<table><tr><td>Error type</td><td>Erroneous sentence</td><td>Correct sentence</td></tr><tr><td colspan=\"2\">\u6211\u548c\u5988\u5988\u662f\u4e0d\u50cf\u522b\u7684\u6bcd\u5973\u3002 \u6211\u540c\u610f\u540e\u8005\u4e3b\u5f20\u3002 (M R (W\u01d2 t\u00f3ng y\u00ec h\u00f2u zh\u011b zh\u01d4 zh\u0101ng.)</td><td>\u6211\u548c\u5988\u5988\u4e0d\u50cf\u522b\u7684\u6bcd\u5973\u3002 \u6211\u540c\u610f\u540e\u8005\u7684\u4e3b\u5f20\u3002 (W\u01d2 t\u00f3ng y\u00ec h\u00f2u zh\u011b de zh\u01d4 zh\u0101ng.)</td></tr><tr><td>S</td><td>\u4e0a\u5468\u6211\u7684\u8f66\u522e\u75bc\u554a\u3002 (Sh\u00e0ng zh\u014du w\u01d2 de ch\u0113 gu\u0101 t\u00e9ng a.)</td><td>\u4e0a\u5468\u6211\u7684\u8f66\u88ab\u522e\u4e86\u3002 (Sh\u00e0ng zh\u014du w\u01d2 de ch\u0113 b\u00e8i gu\u0101 le.)</td></tr><tr><td>W</td><td>\u6211\u662f\u8fd8\u5728\u5b66\u6821\u4e0a\u73ed\u3002 (W\u01d2 sh\u00ec h\u00e1i z\u00e0i xu\u00e9 xi\u00e0o sh\u00e0ng b\u0101n.)</td><td>\u6211\u8fd8\u662f\u5728\u5b66\u6821\u4e0a\u73ed\u3002 (W\u01d2 h\u00e1i sh\u00ec z\u00e0i xu\u00e9 xi\u00e0o sh\u00e0ng b\u0101n.)</td></tr><tr><td/><td/><td>al.,</td></tr></table>", |
| "text": "W\u01d2 h\u00e9 m\u0101 ma sh\u00ec b\u00f9 xi\u00e0ng bi\u00e9 de m\u01d4 n\u01d4.) (W\u01d2 h\u00e9 m\u0101 ma b\u00f9 xi\u00e0ng bi\u00e9 de m\u01d4 n\u01d4.)", |
| "html": null, |
| "num": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "content": "<table><tr><td>Basic GED</td><td colspan=\"2\">Ensemble</td><td/></tr><tr><td>Models</td><td colspan=\"2\">Models</td><td/></tr><tr><td>\u2026</td><td>Distribution Ensemble</td><td>\u2026</td><td>Voting Mechanism</td></tr><tr><td>Inputs</td><td/><td/><td>GED framework</td><td>Post-processing</td><td>Final Outputs</td></tr><tr><td/><td>Seq2seq</td><td/><td/></tr><tr><td/><td>Models</td><td/><td/></tr><tr><td/><td>GECToR</td><td/><td/></tr><tr><td/><td>Models</td><td/><td/></tr><tr><td/><td>Candidate</td><td/><td/></tr><tr><td/><td>Generation</td><td/><td>GEC framework</td></tr></table>", |
| "text": "Example sentences with corresponding errors. Sequences in the bracket are the corresponding transliterations.", |
| "html": null, |
| "num": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "Examples of BIO tags used in basic GED models. Sequences in the bracket are the corresponding transliterations.", |
| "html": null, |
| "num": null |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "Example of fusion of results. Sequences in the bracket are the corresponding transliterations.", |
| "html": null, |
| "num": null |
| }, |
| "TABREF7": { |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "Statistics information of the CGED data.", |
| "html": null, |
| "num": null |
| }, |
| "TABREF8": { |
| "type_str": "table", |
| "content": "<table><tr><td>shows the statis-</td></tr></table>", |
| "text": "", |
| "html": null, |
| "num": null |
| } |
| } |
| } |
| } |