ACL-OCL / Base_JSON /prefixM /json /mia /2022.mia-1.6.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2022",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T03:12:19.142080Z"
},
"title": "Complex Word Identification in Vietnamese: Towards Vietnamese Text Simplification",
"authors": [
{
"first": "Phuong",
"middle": [],
"last": "Nguyen",
"suffix": "",
"affiliation": {},
"email": "phuong.nguyen@pomona.edu"
},
{
"first": "David",
"middle": [],
"last": "Kauchak",
"suffix": "",
"affiliation": {},
"email": "david.kauchak@pomona.edu"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Text Simplification has been an extensively researched problem in English, but has not been investigated in Vietnamese. We focus on the Vietnamese-specific Complex Word Identification task, often the first step in Lexical Simplification (Shardlow, 2013). We examine three different Vietnamese datasets constructed for other natural language processing tasks and show that, like in other languages, frequency is a strong signal in determining whether a word is complex, with a mean accuracy of 86.87%. Across the datasets, we find that the 10% most frequent words in many corpus can be labeled as simple, and the rest as complex, though this is more variable for smaller corpora. We also examine how human annotators perform at this task. Given the subjective nature, there is a fair amount of variability in which words are seen as difficult, though majority results are more consistent.",
"pdf_parse": {
"paper_id": "2022",
"_pdf_hash": "",
"abstract": [
{
"text": "Text Simplification has been an extensively researched problem in English, but has not been investigated in Vietnamese. We focus on the Vietnamese-specific Complex Word Identification task, often the first step in Lexical Simplification (Shardlow, 2013). We examine three different Vietnamese datasets constructed for other natural language processing tasks and show that, like in other languages, frequency is a strong signal in determining whether a word is complex, with a mean accuracy of 86.87%. Across the datasets, we find that the 10% most frequent words in many corpus can be labeled as simple, and the rest as complex, though this is more variable for smaller corpora. We also examine how human annotators perform at this task. Given the subjective nature, there is a fair amount of variability in which words are seen as difficult, though majority results are more consistent.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Text Simplification is a task that focuses on improving the readability and understandability of text while preserving the original content and meaning. Text Simplification applications have been shown to benefit a variety of target audiences, including readers with low-literacy levels (Mason, 1978) , non-native speakers (Paetzold, 2016) , language learners (Gardner et al., 2007; Crossley et al., 2007) , deaf people (Marschark and Spencer, 2010) , people with reading comprehension problems such as aphasia (Carroll et al., 1998) and dyslexia (Rello et al., 2013) , and people with Autistic Spectrum Disorder (Evans et al., 2014) . It is also a useful preprocessing step for other NLP tasks, including parsing (Chandrasekar et al., 1996) , information extraction (Evans, 2011; Miwa et al., 2010) , and question generation (Heilman and Smith, 2010) .",
"cite_spans": [
{
"start": 287,
"end": 300,
"text": "(Mason, 1978)",
"ref_id": "BIBREF28"
},
{
"start": 323,
"end": 339,
"text": "(Paetzold, 2016)",
"ref_id": "BIBREF36"
},
{
"start": 360,
"end": 382,
"text": "(Gardner et al., 2007;",
"ref_id": "BIBREF15"
},
{
"start": 383,
"end": 405,
"text": "Crossley et al., 2007)",
"ref_id": "BIBREF9"
},
{
"start": 420,
"end": 449,
"text": "(Marschark and Spencer, 2010)",
"ref_id": "BIBREF26"
},
{
"start": 511,
"end": 533,
"text": "(Carroll et al., 1998)",
"ref_id": "BIBREF5"
},
{
"start": 547,
"end": 567,
"text": "(Rello et al., 2013)",
"ref_id": "BIBREF39"
},
{
"start": 613,
"end": 633,
"text": "(Evans et al., 2014)",
"ref_id": "BIBREF13"
},
{
"start": 714,
"end": 741,
"text": "(Chandrasekar et al., 1996)",
"ref_id": "BIBREF6"
},
{
"start": 767,
"end": 780,
"text": "(Evans, 2011;",
"ref_id": "BIBREF14"
},
{
"start": 781,
"end": 799,
"text": "Miwa et al., 2010)",
"ref_id": "BIBREF29"
},
{
"start": 826,
"end": 851,
"text": "(Heilman and Smith, 2010)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Although significant progress has been made in text simplification in multiple languages, including English (Coster and Kauchak, 2011; Nisioi et al., 2017; Woodsend and Lapata, 2011) , Spanish (Saggion et al., 2015; Bott et al., 2012) , Portuguese (Alu\u00edsio et al., 2008) , Japanese (Katsuta and Yamamoto, 2019; Maruyama and Yamamoto, 2017) , Korean (Chung et al., 2013) , and Italian (Barlacchi and Tonelli, 2013) , the problem remains a relatively new area of research in Vietnamese, a language spoken by over 70 million people (Van Driem, 2001) in Vietnam, the South East Asia region, France, Australia, and the United States. Sentence splitting has been conducted for the Vietnamese \u2212 English machine translation task (Hung et al., 2012) , which can be helpful as an initial step for Text Simplification, but no further work has been recorded.",
"cite_spans": [
{
"start": 108,
"end": 134,
"text": "(Coster and Kauchak, 2011;",
"ref_id": "BIBREF8"
},
{
"start": 135,
"end": 155,
"text": "Nisioi et al., 2017;",
"ref_id": "BIBREF34"
},
{
"start": 156,
"end": 182,
"text": "Woodsend and Lapata, 2011)",
"ref_id": "BIBREF49"
},
{
"start": 193,
"end": 215,
"text": "(Saggion et al., 2015;",
"ref_id": "BIBREF40"
},
{
"start": 216,
"end": 234,
"text": "Bott et al., 2012)",
"ref_id": "BIBREF4"
},
{
"start": 237,
"end": 270,
"text": "Portuguese (Alu\u00edsio et al., 2008)",
"ref_id": null
},
{
"start": 282,
"end": 310,
"text": "(Katsuta and Yamamoto, 2019;",
"ref_id": "BIBREF23"
},
{
"start": 311,
"end": 339,
"text": "Maruyama and Yamamoto, 2017)",
"ref_id": "BIBREF27"
},
{
"start": 349,
"end": 369,
"text": "(Chung et al., 2013)",
"ref_id": "BIBREF7"
},
{
"start": 384,
"end": 413,
"text": "(Barlacchi and Tonelli, 2013)",
"ref_id": "BIBREF3"
},
{
"start": 529,
"end": 546,
"text": "(Van Driem, 2001)",
"ref_id": "BIBREF47"
},
{
"start": 721,
"end": 740,
"text": "(Hung et al., 2012)",
"ref_id": "BIBREF20"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Other tasks in Vietnamese have been explored, from core problems such as dependency parsing, word segmentation, and part-of-speech parsing to more recent ones such as sentiment analysis, automatic speech recognition, and question answering. 1 Text Summarization is the most closely related task to Text Simplification that has been attempted in Vietnamese.",
"cite_spans": [
{
"start": 241,
"end": 242,
"text": "1",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Progress on the specific task of Complex Word Identification in Vietnamese has not been reported so far. Although the terms complex words and simple words have appeared in literature on the Word Segmentation task, such as in Nguyen et al. (2006b) , Nguyen et al. (2006a), and Anh et al. (2015) , they refer to the length of each word (whether they are monosyllabic or polysyllabic words such as compound and reduplicative words) rather than the understandability and readability of each word in the context of Text Simplification.",
"cite_spans": [
{
"start": 225,
"end": 246,
"text": "Nguyen et al. (2006b)",
"ref_id": "BIBREF32"
},
{
"start": 249,
"end": 275,
"text": "Nguyen et al. (2006a), and",
"ref_id": "BIBREF30"
},
{
"start": 276,
"end": 293,
"text": "Anh et al. (2015)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We implement two approaches to solve the Complex Word Identification task in Vietnamese: frequency-based and classification-based with Support Vector Machines. We conclude with an experiment involving human annotators to predict the suitability of our datasets for this task.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The characteristics presented in this section are extracted from H\u1ea1o (2000) and H\u1eefu et al. (1998) .",
"cite_spans": [
{
"start": 65,
"end": 75,
"text": "H\u1ea1o (2000)",
"ref_id": "BIBREF21"
},
{
"start": 80,
"end": 97,
"text": "H\u1eefu et al. (1998)",
"ref_id": "BIBREF22"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Characteristics of Vietnamese",
"sec_num": "2"
},
{
"text": "Vietnamese is classified to be in the VietMuong group of the Mon-Khmer branch in the Austro-Asiatic language family.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Language Family",
"sec_num": "2.1"
},
{
"text": "Due to past colonization periods, Vietnamese is also heavily influenced by Chinese, as exemplified by the significant number of Sino-Vietnamese words (words with Chinese origin or consists of morphemes of Chinese origin) in the vocabulary, French, as seen in the use of calque (or loan translation), and English.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Language Family",
"sec_num": "2.1"
},
{
"text": "Vietnamese is an isolating and tonal language with the following characteristics:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Language Type",
"sec_num": "2.2"
},
{
"text": "\u2022 It uses a Latin alphabet in conjunction with diacritics and several other letters.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Language Type",
"sec_num": "2.2"
},
{
"text": "\u2022 There are six tones marked by accents: level (\"ngang\"), falling (\"huy\u1ec1n\"), broken (\"ng\u00e3\"), curve (\"h\u1ecfi\"), rising (\"s\u1eafc\"), and drop (\"n\u1eb7ng\"). The pronunciation of these tones differ across the Northern, Southern and Central regions of Vietnam (Alves, 1995) .",
"cite_spans": [
{
"start": 244,
"end": 257,
"text": "(Alves, 1995)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Language Type",
"sec_num": "2.2"
},
{
"text": "\u2022 It is a monosyllabic language.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Language Type",
"sec_num": "2.2"
},
{
"text": "\u2022 It is neither inflected nor conjugated, i.e. all words in Vietnamese are immutable.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Language Type",
"sec_num": "2.2"
},
{
"text": "\u2022 All grammatical relations are established by word order and function words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Language Type",
"sec_num": "2.2"
},
{
"text": "Vietnamese has a unit denoted \"ti\u1ebfng\" that can represent either (Nguy\u1ec5n et al., 2006 ):",
"cite_spans": [
{
"start": 64,
"end": 84,
"text": "(Nguy\u1ec5n et al., 2006",
"ref_id": "BIBREF33"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "A Word Unit",
"sec_num": "2.3"
},
{
"text": "1. a syllable with regards to phonology 2. a morpheme with regards to morpho-syntax",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "A Word Unit",
"sec_num": "2.3"
},
{
"text": "Based on current literature, this unit is commonly referred to as a syllable. Thus, the Vietnamese vocabulary includes monosyllabic words (\"t\u1eeb \u0111\u01a1n\", words with a single syllable) or compound words (\"t\u1eeb ph\u1ee9c\", words with more than one syllable). About 85% of Vietnamese words are compound words and more than 80% of syllables are standalone words Dinh et al., 2008) . This means that unlike in English and other Occidental languages that also utilize Latin alphabets, white spaces are not reliable indicators of word boundaries in Vietnamese. For example, \"h\u1ecdc sinh\" (student) is a compound word that includes two syllables separated by a white space.",
"cite_spans": [
{
"start": 346,
"end": 364,
"text": "Dinh et al., 2008)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "a word with regards to sentence constituent creation",
"sec_num": "3."
},
{
"text": "We conduct two experiments across three Vietnamese corpora of various sizes extracted from different domains. We obtain a simple word list, a stopword list, and use the two lists to extract three complex word lists from the three corpora for evaluation purposes. The simple and complex wordlists for the three corpora are available online. 2",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data",
"sec_num": "3"
},
{
"text": "The following two word lists are used:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Word Lists",
"sec_num": "3.1"
},
{
"text": "\u2022 Simple Word List: A list of 3,000 words obtained by Luong et al. (2018) to construct a Vietnamese text readability formula. The list was used to replace the list of 3,000 words that fourth grade students can understand used in the Dale-Chall formula for English readability (Dawkins et al., 1956) in the development of an equivalent readability formula in Vietnamese.",
"cite_spans": [
{
"start": 54,
"end": 73,
"text": "Luong et al. (2018)",
"ref_id": "BIBREF24"
},
{
"start": 276,
"end": 298,
"text": "(Dawkins et al., 1956)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Word Lists",
"sec_num": "3.1"
},
{
"text": "\u2022 Stopword List: A list of 1942 stop words. 3",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Word Lists",
"sec_num": "3.1"
},
{
"text": "The following three corpora are used to conduct experiments. They are named according to the purpose of their construction.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3.2"
},
{
"text": "\u2022 READABILITY (Luong et al., 2020) This corpus, constructed for research in Vietnamese text readability, contains 1,825 documents of approximately 3 million words in the literature domain. The documents were sourced from college-level textbooks, stories and literature websites, and were preprocessed for the minimization of spelling errors and standardization of punctuation, encoding, and tone. The corpus was then divided by experts into four categories: Very Easy (intended for children or people with middle-school education), Easy (intended for middle-school children or people with middle-school education), Medium (intended for high-school students or people with high-school education), and Difficult (specialized text intended for people with college education). Based on the Vietnamese Dictionary by Hoang 2017, more difficult groups of texts are more likely to include Sino-Vietnamese words and other words borrowed from English and French.",
"cite_spans": [
{
"start": 14,
"end": 34,
"text": "(Luong et al., 2020)",
"ref_id": "BIBREF25"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3.2"
},
{
"text": "For this work we only use the Difficult subcorpus.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3.2"
},
{
"text": "\u2022 CLUSTER (Tran et al., 2020) This dataset was constructed for the task of abstractive multi-document summarization. The dataset includes 600 summaries of 300 clusters with 1,945 news articles on five topics: world news, domestic news, business, entertainment and sports extracted from various news outlets aggregated by Google News in Vietnamese. Every cluster contains 4 -10 articles, and the average number of articles per cluster is 6. Each document contains the following information: the title, the text content, the news source, the date of publication, the author(s), the tag(s), and the headline summary. These pieces of information are labelled using English.",
"cite_spans": [
{
"start": 10,
"end": 29,
"text": "(Tran et al., 2020)",
"ref_id": "BIBREF46"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3.2"
},
{
"text": "For this work we only use the original documents.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3.2"
},
{
"text": "\u2022 CLASSIFICATION (Hoang et al., 2007) This corpus was constructed to solve the Text Classification task (labeling documents with a predefined topic). The corpus was comprised of articles from four major online newspapers, including VnExpress, TuoiTre Online, Thanh Nien Online, and Nguoi Lao Dong online. The data preprocessing phase included the removal of HTML tags, normalization of spelling, and other heuristics. There are 27 predefined topics ranging from music, family, and eating and drinking, to international business, new computer products and fine arts.",
"cite_spans": [
{
"start": 17,
"end": 37,
"text": "(Hoang et al., 2007)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3.2"
},
{
"text": "The authors constructed 2 corpora of 2 levels of topic specificity (the higher level one included more fine-grained topic categorization). Corpus level 2 is used in this project.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpora",
"sec_num": "3.2"
},
{
"text": "Since whitespace cannot be used to identify words in Vietnamese, we use the VNCoreNLP toolkit (Vu et al., 2018) for the word segmentation process. The word segmentation tool in the toolkit relies on the use of the Single Classification Ripple Down Rules (SCRDR) tree and was reported to achieve the best F1 score out of notable segmenters including vnTokenizer, JVnSegmenter, and DongDu (Nguyen et al., 2017) . We extract three complex word lists from the three corpora by removing all of the simple words, stopwords, proper nouns (words whose syllables are all capitalized), invalid words (such as words that contain numbers, letters, hyperlinks, and English words that are used repeatedly). The syllables in each word are concatenated with \"_\" as white spaces are not reliable indicators of word boundaries in Vietnamese. The remaining words are then identified as complex. Table 1 shows various statistics for the three corpora. The Readability corpus has the smallest number of documents, but the documents tend to be longer. The Cluster corpus is the smallest of the three corpus with just over half a millions words. The Classification corpus is the largest, both in the number of documents and the number of words. These sizes are paralleled in the number of unique words from each corpora, though the Cluster corpus is high given its size indicating a slightly more difficult corpus. All of the corpora are comprised of about 60% simple words, though, again the Cluster corpus is slightly smaller than this.",
"cite_spans": [
{
"start": 387,
"end": 408,
"text": "(Nguyen et al., 2017)",
"ref_id": "BIBREF31"
}
],
"ref_spans": [
{
"start": 876,
"end": 883,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Data Preprocessing",
"sec_num": "3.3"
},
{
"text": "For the experiments, we rely on the simple word list, and the 3 complex word lists as extracted above. We concatenate the simple word list with each of the 3 complex word lists to create 3 three separate datasets. These word lists will be referred to by their corpus' name in the following sections.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data Preprocessing",
"sec_num": "3.3"
},
{
"text": "For each dataset, we have the simple word list and the list of unique complex words. This creates three complex word identification tasks to identify whether a word is simple or complex. We examine two approaches for the this task: frequency threshold and feature-based using Support Vector Machines. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Methods",
"sec_num": "4"
},
{
"text": "For the Complex Word Identification task in English, frequency is an overpowering signal in determining whether a word is complex (Paetzold and Specia, 2016) . The frequency approach only uses the frequency of a word in a particular corpus to label it as complex or simple.",
"cite_spans": [
{
"start": 130,
"end": 157,
"text": "(Paetzold and Specia, 2016)",
"ref_id": "BIBREF35"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Frequency Threshold",
"sec_num": "4.1"
},
{
"text": "For each of the three datasets that include both simple and complex words, we split it into training (75%) and testing (25%) data. Within the training dataset, we sort all of the words by frequency, and consider each frequency f out of all frequencies recorded as a cutoff point. For each frequency f , a word will be labelled complex if its frequency is smaller than or equal to f , and it will be labelled simple otherwise. We consider all possible frequencies f as the cutoff point and and identify the frequency that has the highest classification accuracy as our threshold for applying to the testing data.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Frequency Threshold",
"sec_num": "4.1"
},
{
"text": "The frequency approach only utilizes a single feature. Many features have been suggested for use in the complex word identification task (Paetzold and Specia, 2016) . For our classifier we used four features: corpus-specific frequency, number of syllables, number of characters, and number of characters and diacritics. All of the features besides word length try and capture different notions of word length. Some of these have worked well in other languages and some of these are specifically available in Vietnamese (i.e., diacritics).",
"cite_spans": [
{
"start": 137,
"end": 164,
"text": "(Paetzold and Specia, 2016)",
"ref_id": "BIBREF35"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Support Vector Machines Classifier",
"sec_num": "4.2"
},
{
"text": "The number of syllables is calculated based on the number of underscores found in a word. Be-cause white spaces are not reliable indicators of word boundaries in Vietnamese, we concatenate the syllables of one word together with underscores in the data preprocessing step.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Support Vector Machines Classifier",
"sec_num": "4.2"
},
{
"text": "The number of characters and diacritics are calculated as the length of the word after being normalized into NFD (Normal Form D, also known as canonical decomposition) 4 with the unicodedata Python module. 5 We used the scikit-learn package (Pedregosa et al., 2011) with the default regularization parameter C = 1 and the radial basis function kernel.",
"cite_spans": [
{
"start": 206,
"end": 207,
"text": "5",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Support Vector Machines Classifier",
"sec_num": "4.2"
},
{
"text": "We evaluate the performance of the two approaches on the three corpora based on overall accuracy and precision, recall, and F1 (for identifying simple words).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiments",
"sec_num": "5"
},
{
"text": "Frequency has been shown to be a strong signal in the CWI process. Figure 1 shows the frequency distribution of the three datasets. As expected, all three follow the standard Zipf's like distribution with a small number of words occurring very frequently and most of the words only occuring a small number of times. Table 2 shows the accuracy, precision, recall and F1 scores. Overall, the approach does quite well with accuracies above 80% on all three corpora. The recall is high, highlighting that the approach is particularly good at identifying simple words. The results are significantly higher across all metrics on the Classification corpus. This is the corpus with the most data, and all documents represent news articles, which may have helped with consistency both because of source as well as writing practices. Table 3 shows the cutoff frequencies and cutoff percentiles (if the words have frequencies below the percentile, then they are complex words). While cutoff itself varies significantly (mostly due to the size of the corpus), the percentage this frequency represents is much more consistent. For the two larger corpora, Readability and Classification, there is only a one percentage point difference: the top 10% most frequent words are the simple words. The Cluster dataset has a lower frequency cutoff. We hypothesize this may have to do with its small size, though the source of the corpus might also play a role. More investigation is needed. Figure 2 shows the accuracy distributions across possible cutoff frequencies for the three datasets. The pattern is consistent across the three datasets. The classification accuracy reaches a peak very quickly and then tends to taper off. The accuracy slightly drops and hits a plateau, except in the case of the Classification dataset in which the accuracy remains very high beyond the peak accuracy point. Table 4 shows the accuracy, precision, recall and F1 number for the feature-based SVM approach. The SVM approach tends to have slightly higher recall than the threshold approach, but the other met- rics are not significantly different. The additional features may provide some small information, but the SVM is still heavily relying on the frequency feature to make its prediction.",
"cite_spans": [],
"ref_spans": [
{
"start": 67,
"end": 75,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 316,
"end": 323,
"text": "Table 2",
"ref_id": "TABREF3"
},
{
"start": 824,
"end": 831,
"text": "Table 3",
"ref_id": "TABREF4"
},
{
"start": 1469,
"end": 1477,
"text": "Figure 2",
"ref_id": "FIGREF1"
},
{
"start": 1877,
"end": 1884,
"text": "Table 4",
"ref_id": "TABREF6"
}
],
"eq_spans": [],
"section": "Frequency Threshold",
"sec_num": "5.1"
},
{
"text": "To quantify the quality of the datasets for the automated CWI task in Vietnamese, three participants were asked to manually classify 199 words as simple or complex, with 100 words randomly picked from the simple words list and 99 words from the Readability complex word list. The words were presented by themselves without any additional context. All participants were native Vietnamese speakers pursuing a college degree in the United States. The instructions were provided in Vietnamese, in which an example of one simple word and one complex word is demonstrated. The participants were reassured that there are no right or wrong answers, encouraged to use their intuition when making the decision, and to label a word as complex when in doubt. Results are reported under two circumstances: a word gets assigned a label during this collective classification process if (a) the label is chosen by all 3 of the participants and (b) the label is chosen by a majority (i.e., 2 out of 3) participants. Table 5 shows the results for the humans annotators. There is a drastic increase across all of the metrics when we remove the restriction that all annotators need to agree on a label. Accuracy increases two-fold from around 43% to 82%, and precision rises to 100%, meaning no simple words are mislabelled. Recall nearly reaches 75%, which reflects a decent level of agreement between the annotators' idea of complexity and what is represented in the Readability dataset. However, both between annotators as well as between the task construction, there is still some contention about which words are simple and complex. This highlights the difficulty and the subjectivity of this task.",
"cite_spans": [],
"ref_spans": [
{
"start": 999,
"end": 1006,
"text": "Table 5",
"ref_id": "TABREF7"
}
],
"eq_spans": [],
"section": "Human Annotation",
"sec_num": "6"
},
{
"text": "Frequency is an overpowering signal in determining whether a word is complex or simple as shown by the accuracy, precision, recall and F1 scores of the Frequency Threshold experiment, which are all are greater than 0.8 (see Table 2 ). Recall scores are all greater than 0.9 across the three datasets, indicating that this approach can reliably identify complex words. This finding is consistent with the results obtained from the Complex Word Classification task in English (Paetzold and Specia, 2016) .",
"cite_spans": [
{
"start": 474,
"end": 501,
"text": "(Paetzold and Specia, 2016)",
"ref_id": "BIBREF35"
}
],
"ref_spans": [
{
"start": 224,
"end": 231,
"text": "Table 2",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "Discussion",
"sec_num": "7"
},
{
"text": "We analyze three corpora to try understand how consistent frequency is. For the larger corpora, it is surprisingly consistent with words in the top 10% most frequent words as simple. For smaller corpora this is more varied.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "7"
},
{
"text": "There are some shortcomings in the datasets that may affect the performance. There exist words in the simple word list that are acronyms that may be obvious to a certain target audience but not for the majority of Vietnamese readers (such as \"UBND\", which stands for \"U\u1ef7 ban nh\u00e2n d\u00e2n\" (people's committee)), and can mean different things in different contexts (such as TP, which can mean \"th\u00e0nh ph\u1ed1\" (city) or \"th\u00e0nh ph\u1ea7n\" (ingredient)). The Cluster and Classification datasets also involve foreign words, especially English words, that can add noise to the data.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "7"
},
{
"text": "Support Vector Machines are also explored to incorporate additional information into the prediction task. Three more features are added in addition to frequency for the SVM model: number of syllables, number of characters, and number of characters and diacritics. We hypothesize that longer words and words with more diacritics will be harder to recognize and understand. For example, \"c\u1ecf c\u00e2y\" (trees and plants) can be perceived as a simpler word to understand than \"\u0111\u01b0\u1eddng s\u00e1\" (streets). However, results show that using SVM with more features do not improve the performance of the classification task compared to using a frequency threshold. In fact, we observe a decline in precision (from 92.40% to 81.95%) and F1 score (from 94.73% to 89.39%) on the Readability dataset. This can be explained by the fact that surface-level word features do not necessarily make the word more complex in terms of readability and understandability. Coming back to our example, although the former word \"c\u1ecf c\u00e2y\" is shorter and has fewer diacritics, it can also be simpler because both words have clear meanings (\"c\u1ecf\" -grass and \"c\u00e2y\" -plant), while the second syllable of the latter word \"\u0111\u01b0\u1eddng s\u00e1\" is a Sino-Vietnamese word that may not be clearly decipherable. Because of this reason, \"trung ki\u00ean\" (loyal), which is a Sino-Vietnamese word, can be viewed as more complex than \"ph\u01b0\u01a1ng h\u01b0\u1edbng\" (direction), which is a more common word. Again, this particular example shows that frequency gives a very strong signal.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "7"
},
{
"text": "The Human Annotation experiment shows a great difference between labeling based on the agreement between all three annotators or between the majority of annotators (2 out of 3 annotators). The accuracy and recall scores nearly double, and the precision score is 1.0 for the majority vote. This means that the majority of annotators' labeling of complex words is consistent with the data we obtain, which can indicate the suitability of the Readability dataset for the CWI training purposes.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "7"
},
{
"text": "Several next steps can be taken beyond this project: More Salient Features: Features that describe a word's characteristics beyond its pronunciation can be helpful to obtain a better classification performance. Some examples include sense count (number of entries in a dictionary for example), synonym count, and word type (whether the word is loan word).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "8"
},
{
"text": "The approach we explore predicts words as simple/complex regardless of their context. In some cases, the context information can help provide additional information and additional features to help the identification (Paetzold and Specia, 2016) .",
"cite_spans": [
{
"start": 216,
"end": 243,
"text": "(Paetzold and Specia, 2016)",
"ref_id": "BIBREF35"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Context:",
"sec_num": null
},
{
"text": "More Diverse Human Annotators: Developing a clear definition of \"word simplicity\" and \"word complexity\" that reflects the needs of specific audiences by creating a bigger and more diverse pool of annotators with regards to gender, education background, and income level can also be helpful in constructing models that personalize text simplification for readers from different groups.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Context:",
"sec_num": null
},
{
"text": "Text Simplification is the process of reducing the syntactical and lexical complexity of original text to make it more readable and understandable. Although this task has been shown to benefit various groups of audience and has been researched and experimented with extensively in English and several other languages, there has not been considerable progress made in Vietnamese-specific Text Simplification. In this study, we focus on the Complex Word Identification step in the Lexical Simplification pipeline, one approach to solve the Text Simplification problem. We view the question as a binary classification task, and conduct three experiments Frequency Threshold, Support Vector Machines, and Human Annotation to identify important features in the classification process and investigate the quality of our datasets for this particular purpose.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Context:",
"sec_num": null
},
{
"text": "We observe that frequency is a very strong signal in the Complex Word Identification process in Vietnamese, shown by the Frequency Threshold experiment where we achieve a mean accuracy of 86.87% across our three datasets. The consistency of results across the three datasets give us a general rule to identify complex words in any corpus: the 10-20% of most frequent words are likely to be simple words. The use of Support Vector Machines with surface-level word features such as number of syllables and number of characters only marginally improves the recall scores but makes no significant difference in terms of accuracy, precision, and F1 scores. The Human Annotation experiment demonstrates how with a small number of annotators and a small sample, we can quantify how one dataset aligns with the definition of word complexity of college-educated native Vietnamese speakers. Considering the absence of significant progress on the Vietnamese-specific Text Simplification task and specifically the Complex Word Identification question, these three experiments constitute a first step in the exploration of the Lexical Simplification pipeline for Vietnamese.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Context:",
"sec_num": null
},
{
"text": "https://github.com/undertheseanlp/ NLP-Vietnamese-progress",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "https://github.com/phuongnguyen00/ cwi-in-vietnamese 3 https://github.com/stopwords/ vietnamese-stopwords",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "This method does not account for the diacritic found in the letter \"\u0111\", but accounts for all other diacritics.5 https://docs.python.org/3/library/ unicodedata.html",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Towards brazilian portuguese automatic text simplification systems",
"authors": [
{
"first": "Lucia",
"middle": [],
"last": "Sandra M Alu\u00edsio",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Specia",
"suffix": ""
},
{
"first": "A",
"middle": [
"S"
],
"last": "Thiago",
"suffix": ""
},
{
"first": "Erick",
"middle": [
"G"
],
"last": "Pardo",
"suffix": ""
},
{
"first": "Renata Pm",
"middle": [],
"last": "Maziero",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Fortes",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the eighth ACM symposium on Document engineering",
"volume": "",
"issue": "",
"pages": "240--248",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sandra M Alu\u00edsio, Lucia Specia, Thiago AS Pardo, Erick G Maziero, and Renata PM Fortes. 2008. To- wards brazilian portuguese automatic text simplifi- cation systems. In Proceedings of the eighth ACM symposium on Document engineering, pages 240- 248.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Tonal features and the development of vietnamese tones",
"authors": [
{
"first": "Mark",
"middle": [],
"last": "Alves",
"suffix": ""
}
],
"year": 1995,
"venue": "Working Papers in Linguistics",
"volume": "27",
"issue": "",
"pages": "1--13",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mark Alves. 1995. Tonal features and the development of vietnamese tones. Working Papers in Linguistics, 27:1-13.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Identifying reduplicative words for vietnamese word segmentation",
"authors": [
{
"first": "Anh",
"middle": [],
"last": "Tran Ngoc",
"suffix": ""
},
{
"first": "Phuong",
"middle": [],
"last": "Nguyen",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Thai",
"suffix": ""
},
{
"first": "Thanh",
"middle": [],
"last": "Dao",
"suffix": ""
},
{
"first": "Nguyen Hong",
"middle": [],
"last": "Tinh",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Quan",
"suffix": ""
}
],
"year": 2015,
"venue": "The 2015 IEEE RIVF International Conference on Computing & Communication Technologies-Research, Innovation, and Vision for Future (RIVF)",
"volume": "",
"issue": "",
"pages": "77--82",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tran Ngoc Anh, Nguyen Phuong Thai, Dao Thanh Tinh, and Nguyen Hong Quan. 2015. Identifying reduplicative words for vietnamese word segmenta- tion. In The 2015 IEEE RIVF International Confer- ence on Computing & Communication Technologies- Research, Innovation, and Vision for Future (RIVF), pages 77-82. IEEE.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Ernesta: A sentence simplification tool for children's stories in italian",
"authors": [
{
"first": "Gianni",
"middle": [],
"last": "Barlacchi",
"suffix": ""
},
{
"first": "Sara",
"middle": [],
"last": "Tonelli",
"suffix": ""
}
],
"year": 2013,
"venue": "International Conference on Intelligent Text Processing and Computational Linguistics",
"volume": "",
"issue": "",
"pages": "476--487",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gianni Barlacchi and Sara Tonelli. 2013. Ernesta: A sentence simplification tool for children's stories in italian. In International Conference on Intelli- gent Text Processing and Computational Linguistics, pages 476-487. Springer.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Text simplification tools for spanish",
"authors": [
{
"first": "Stefan",
"middle": [],
"last": "Bott",
"suffix": ""
},
{
"first": "Horacio",
"middle": [],
"last": "Saggion",
"suffix": ""
},
{
"first": "Simon",
"middle": [],
"last": "Mille",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12)",
"volume": "",
"issue": "",
"pages": "1665--1671",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stefan Bott, Horacio Saggion, and Simon Mille. 2012. Text simplification tools for spanish. In Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12), pages 1665- 1671.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Practical simplification of english newspaper text to assist aphasic readers",
"authors": [
{
"first": "John",
"middle": [],
"last": "Carroll",
"suffix": ""
},
{
"first": "Guido",
"middle": [],
"last": "Minnen",
"suffix": ""
},
{
"first": "Yvonne",
"middle": [],
"last": "Canning",
"suffix": ""
},
{
"first": "Siobhan",
"middle": [],
"last": "Devlin",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Tait",
"suffix": ""
}
],
"year": 1998,
"venue": "Proceedings of the AAAI-98 Workshop on Integrating Artificial Intelligence and Assistive Technology",
"volume": "",
"issue": "",
"pages": "7--10",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "John Carroll, Guido Minnen, Yvonne Canning, Siobhan Devlin, and John Tait. 1998. Practical simplification of english newspaper text to assist aphasic readers. In Proceedings of the AAAI-98 Workshop on Integrat- ing Artificial Intelligence and Assistive Technology, pages 7-10. Citeseer.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Motivations and methods for text simplification",
"authors": [
{
"first": "Raman",
"middle": [],
"last": "Chandrasekar",
"suffix": ""
},
{
"first": "Christine",
"middle": [],
"last": "Doran",
"suffix": ""
},
{
"first": "Srinivas",
"middle": [],
"last": "Bangalore",
"suffix": ""
}
],
"year": 1996,
"venue": "The 16th International Conference on Computational Linguistics",
"volume": "2",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Raman Chandrasekar, Christine Doran, and Srinivas Bangalore. 1996. Motivations and methods for text simplification. In COLING 1996 Volume 2: The 16th International Conference on Computational Linguis- tics.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Enhancing readability of web documents by text augmentation for deaf people",
"authors": [
{
"first": "Jin-Woo",
"middle": [],
"last": "Chung",
"suffix": ""
},
{
"first": "Hye-Jin",
"middle": [],
"last": "Min",
"suffix": ""
},
{
"first": "Joonyeob",
"middle": [],
"last": "Kim",
"suffix": ""
},
{
"first": "Jong C",
"middle": [],
"last": "Park",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 3rd International Conference on Web Intelligence, Mining and Semantics",
"volume": "",
"issue": "",
"pages": "1--10",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jin-Woo Chung, Hye-Jin Min, Joonyeob Kim, and Jong C Park. 2013. Enhancing readability of web documents by text augmentation for deaf people. In Proceedings of the 3rd International Conference on Web Intelligence, Mining and Semantics, pages 1-10.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Learning to simplify sentences using wikipedia",
"authors": [
{
"first": "William",
"middle": [],
"last": "Coster",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Kauchak",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the workshop on monolingual text-to-text generation",
"volume": "",
"issue": "",
"pages": "1--9",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "William Coster and David Kauchak. 2011. Learning to simplify sentences using wikipedia. In Proceedings of the workshop on monolingual text-to-text genera- tion, pages 1-9.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "A linguistic analysis of simplified and authentic texts",
"authors": [
{
"first": "A",
"middle": [],
"last": "Scott",
"suffix": ""
},
{
"first": "Max",
"middle": [
"M"
],
"last": "Crossley",
"suffix": ""
},
{
"first": "Philip",
"middle": [
"M"
],
"last": "Louwerse",
"suffix": ""
},
{
"first": "Danielle",
"middle": [
"S"
],
"last": "Mc-Carthy",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Mcnamara",
"suffix": ""
}
],
"year": 2007,
"venue": "The Modern Language Journal",
"volume": "91",
"issue": "1",
"pages": "15--30",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Scott A Crossley, Max M Louwerse, Philip M Mc- Carthy, and Danielle S McNamara. 2007. A lin- guistic analysis of simplified and authentic texts. The Modern Language Journal, 91(1):15-30.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "A reconsideration of the dale-chall formula",
"authors": [
{
"first": "John",
"middle": [],
"last": "Dawkins",
"suffix": ""
},
{
"first": "Edgar",
"middle": [],
"last": "Dale",
"suffix": ""
},
{
"first": "Jeanne",
"middle": [
"S"
],
"last": "Chall",
"suffix": ""
}
],
"year": 1956,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "John Dawkins, Edgar Dale, and Jeanne S Chall. 1956. A reconsideration of the dale-chall formula [with reply].",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Word segmentation of vietnamese texts: a comparison of approaches",
"authors": [
{
"first": "Hong",
"middle": [
"Phuong"
],
"last": "Quang Thang Dinh",
"suffix": ""
},
{
"first": "Thi",
"middle": [],
"last": "Le",
"suffix": ""
},
{
"first": "Cam",
"middle": [
"Tu"
],
"last": "Minh Huyen Nguyen",
"suffix": ""
},
{
"first": "Mathias",
"middle": [],
"last": "Nguyen",
"suffix": ""
},
{
"first": "Xuan",
"middle": [
"Luong"
],
"last": "Rossignol",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Vu",
"suffix": ""
}
],
"year": 2008,
"venue": "6th international conference on Language Resources and Evaluation-LREC",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Quang Thang Dinh, Hong Phuong Le, Thi Minh Huyen Nguyen, Cam Tu Nguyen, Mathias Rossignol, and Xuan Luong Vu. 2008. Word segmentation of viet- namese texts: a comparison of approaches. In 6th international conference on Language Resources and Evaluation-LREC 2008.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "An evaluation of syntactic simplification rules for people with autism",
"authors": [
{
"first": "Richard",
"middle": [],
"last": "Evans",
"suffix": ""
},
{
"first": "Constantin",
"middle": [],
"last": "Orasan",
"suffix": ""
},
{
"first": "Iustin",
"middle": [],
"last": "Dornescu",
"suffix": ""
}
],
"year": 2014,
"venue": "Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Richard Evans, Constantin Orasan, and Iustin Dornescu. 2014. An evaluation of syntactic simplification rules for people with autism. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Comparing methods for the syntactic simplification of sentences in information extraction. Literary and linguistic computing",
"authors": [
{
"first": "J",
"middle": [],
"last": "Richard",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Evans",
"suffix": ""
}
],
"year": 2011,
"venue": "",
"volume": "26",
"issue": "",
"pages": "371--388",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Richard J Evans. 2011. Comparing methods for the syntactic simplification of sentences in informa- tion extraction. Literary and linguistic computing, 26(4):371-388.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Effects of lexical simplification during unaided reading of english informational texts",
"authors": [
{
"first": "Elizabeth C Dee",
"middle": [],
"last": "Gardner",
"suffix": ""
}
],
"year": 2007,
"venue": "TESL Reporter",
"volume": "40",
"issue": "",
"pages": "33--33",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Elizabeth C Dee Gardner et al. 2007. Effects of lexi- cal simplification during unaided reading of english informational texts. TESL Reporter, 40:33-33.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Source sentence simplification for statistical machine translation",
"authors": [
{
"first": "Eva",
"middle": [],
"last": "Hasler",
"suffix": ""
},
{
"first": "Adri\u00e0",
"middle": [],
"last": "De Gispert",
"suffix": ""
},
{
"first": "Felix",
"middle": [],
"last": "Stahlberg",
"suffix": ""
},
{
"first": "Aurelien",
"middle": [],
"last": "Waite",
"suffix": ""
},
{
"first": "Bill",
"middle": [],
"last": "Byrne",
"suffix": ""
}
],
"year": 2017,
"venue": "Computer Speech & Language",
"volume": "45",
"issue": "",
"pages": "221--235",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Eva Hasler, Adri\u00e0 de Gispert, Felix Stahlberg, Aurelien Waite, and Bill Byrne. 2017. Source sentence simpli- fication for statistical machine translation. Computer Speech & Language, 45:221-235.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Good question! statistical ranking for question generation",
"authors": [
{
"first": "Michael",
"middle": [],
"last": "Heilman",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Noah",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Smith",
"suffix": ""
}
],
"year": 2010,
"venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "609--617",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michael Heilman and Noah A Smith. 2010. Good ques- tion! statistical ranking for question generation. In Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the As- sociation for Computational Linguistics, pages 609- 617.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "T\u1eeb \u0111i\u1ec3n Ti\u1ebfng Vi\u1ec7t (Vietnamese Dictionary)",
"authors": [
{
"first": "Phe",
"middle": [],
"last": "Hoang",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Phe Hoang. 2017. T\u1eeb \u0111i\u1ec3n Ti\u1ebfng Vi\u1ec7t (Vietnamese Dic- tionary). Da Nang Publising House.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "A comparative study on vietnamese text classification methods",
"authors": [
{
"first": "Duy",
"middle": [],
"last": "Vu Cong",
"suffix": ""
},
{
"first": "Dien",
"middle": [],
"last": "Hoang",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Dinh",
"suffix": ""
},
{
"first": "Hung",
"middle": [
"Quoc"
],
"last": "Nguyen Le Nguyen",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Ngo",
"suffix": ""
}
],
"year": 2007,
"venue": "2007 IEEE international conference on research, innovation and vision for the future",
"volume": "",
"issue": "",
"pages": "267--273",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Vu Cong Duy Hoang, Dien Dinh, Nguyen Le Nguyen, and Hung Quoc Ngo. 2007. A comparative study on vietnamese text classification methods. In 2007 IEEE international conference on research, innova- tion and vision for the future, pages 267-273. IEEE.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Sentence splitting for vietnamese-english machine translation",
"authors": [
{
"first": "Thanh",
"middle": [],
"last": "Bui",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Hung",
"suffix": ""
},
{
"first": "Akira",
"middle": [],
"last": "Nguyen Le Minh",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Shimazu",
"suffix": ""
}
],
"year": 2012,
"venue": "2012 Fourth International Conference on Knowledge and Systems Engineering",
"volume": "",
"issue": "",
"pages": "156--160",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bui Thanh Hung, Nguyen Le Minh, and Akira Shimazu. 2012. Sentence splitting for vietnamese-english machine translation. In 2012 Fourth International Conference on Knowledge and Systems Engineering, pages 156-160. IEEE.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Ti\u1ebfng vi\u1ec7t-m\u1ea5y v\u1ea5n \u0111\u1ec1 ng\u1eef \u00e2m, ng\u1eef ph\u00e1p, ng\u1eef ngh\u0129a (vietnamese-some questions on phonetics, syntax and semantics). NXB Gi\u00e1o d\u1ee5c",
"authors": [
{
"first": "H\u1ea1o",
"middle": [],
"last": "Cao Xu\u00e2n",
"suffix": ""
}
],
"year": 2000,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Cao Xu\u00e2n H\u1ea1o. 2000. Ti\u1ebfng vi\u1ec7t-m\u1ea5y v\u1ea5n \u0111\u1ec1 ng\u1eef \u00e2m, ng\u1eef ph\u00e1p, ng\u1eef ngh\u0129a (vietnamese-some questions on phonetics, syntax and semantics). NXB Gi\u00e1o d\u1ee5c, Hanoi.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "C\u01a1 s\u1edf ti\u1ebfng vi\u1ec7t",
"authors": [
{
"first": "\u0110\u1ea1t",
"middle": [],
"last": "H\u1eefu",
"suffix": ""
},
{
"first": "T",
"middle": [
"L"
],
"last": "Td Tr\u1ea7n",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "\u0110\u00e0o",
"suffix": ""
}
],
"year": 1998,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "\u0110\u1ea1t H\u1eefu, TD Tr\u1ea7n, and TL \u0110\u00e0o. 1998. C\u01a1 s\u1edf ti\u1ebfng vi\u1ec7t (basis of vietnamese).",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Improving text simplification by corpus expansion with unsupervised learning",
"authors": [
{
"first": "Akihiro",
"middle": [],
"last": "Katsuta",
"suffix": ""
},
{
"first": "Kazuhide",
"middle": [],
"last": "Yamamoto",
"suffix": ""
}
],
"year": 2019,
"venue": "2019 International Conference on Asian Language Processing (IALP)",
"volume": "",
"issue": "",
"pages": "216--221",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Akihiro Katsuta and Kazuhide Yamamoto. 2019. Im- proving text simplification by corpus expansion with unsupervised learning. In 2019 International Con- ference on Asian Language Processing (IALP), pages 216-221. IEEE.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "A new formula for vietnamese text readability assessment",
"authors": [
{
"first": "An-Vinh",
"middle": [],
"last": "Luong",
"suffix": ""
},
{
"first": "Diep",
"middle": [],
"last": "Nguyen",
"suffix": ""
},
{
"first": "Dien",
"middle": [],
"last": "Dinh",
"suffix": ""
}
],
"year": 2018,
"venue": "2018 10th International Conference on Knowledge and Systems Engineering (KSE)",
"volume": "",
"issue": "",
"pages": "198--202",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "An-Vinh Luong, Diep Nguyen, and Dien Dinh. 2018. A new formula for vietnamese text readability as- sessment. In 2018 10th International Conference on Knowledge and Systems Engineering (KSE), pages 198-202. IEEE.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Building a corpus for vietnamese text readability assessment in the literature domain",
"authors": [
{
"first": "An-Vinh",
"middle": [],
"last": "Luong",
"suffix": ""
},
{
"first": "Diep",
"middle": [],
"last": "Nguyen",
"suffix": ""
},
{
"first": "Dien",
"middle": [],
"last": "Dinh",
"suffix": ""
}
],
"year": 2020,
"venue": "Universal Journal of Educational Research",
"volume": "8",
"issue": "10",
"pages": "4996--5004",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "An-Vinh Luong, Diep Nguyen, and Dien Dinh. 2020. Building a corpus for vietnamese text readability as- sessment in the literature domain. Universal Journal of Educational Research, 8(10):4996-5004.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "The Oxford handbook of deaf studies, language, and education",
"authors": [
{
"first": "Marc",
"middle": [],
"last": "Marschark",
"suffix": ""
},
{
"first": "Patricia",
"middle": [
"Elizabeth"
],
"last": "Spencer",
"suffix": ""
}
],
"year": 2010,
"venue": "",
"volume": "2",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marc Marschark and Patricia Elizabeth Spencer. 2010. The Oxford handbook of deaf studies, language, and education, vol. 2. Oxford University Press.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "Sentence simplification with core vocabulary",
"authors": [
{
"first": "Takumi",
"middle": [],
"last": "Maruyama",
"suffix": ""
},
{
"first": "Kazuhide",
"middle": [],
"last": "Yamamoto",
"suffix": ""
}
],
"year": 2017,
"venue": "2017 International Conference on Asian Language Processing (IALP)",
"volume": "",
"issue": "",
"pages": "363--366",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Takumi Maruyama and Kazuhide Yamamoto. 2017. Sentence simplification with core vocabulary. In 2017 International Conference on Asian Language Processing (IALP), pages 363-366. IEEE.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Facilitating reading comprehension through text structure manipulation",
"authors": [
{
"first": "M",
"middle": [],
"last": "Jana",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Mason",
"suffix": ""
}
],
"year": 1978,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jana M Mason. 1978. Facilitating reading comprehen- sion through text structure manipulation. Center for the Study of Reading Technical Report; no. 092.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "Entity-focused sentence simplification for relation extraction",
"authors": [
{
"first": "Makoto",
"middle": [],
"last": "Miwa",
"suffix": ""
},
{
"first": "Rune",
"middle": [],
"last": "Saetre",
"suffix": ""
},
{
"first": "Yusuke",
"middle": [],
"last": "Miyao",
"suffix": ""
},
{
"first": "Jun'ichi",
"middle": [],
"last": "Tsujii",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 23rd International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "788--796",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Makoto Miwa, Rune Saetre, Yusuke Miyao, and Jun'ichi Tsujii. 2010. Entity-focused sentence simplification for relation extraction. In Proceedings of the 23rd In- ternational Conference on Computational Linguistics (Coling 2010), pages 788-796.",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "Vietnamese word segmentation with crfs and svms: An investigation",
"authors": [
{
"first": "Cam-Tu",
"middle": [],
"last": "Nguyen",
"suffix": ""
},
{
"first": "Trung-Kien",
"middle": [],
"last": "Nguyen",
"suffix": ""
},
{
"first": "Xuan-Hieu",
"middle": [],
"last": "Phan",
"suffix": ""
},
{
"first": "Minh",
"middle": [
"Le"
],
"last": "Nguyen",
"suffix": ""
},
{
"first": "Quang Thuy",
"middle": [],
"last": "Ha",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the 20th Pacific Asia Conference on Language, Information and Computation",
"volume": "",
"issue": "",
"pages": "215--222",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Cam-Tu Nguyen, Trung-Kien Nguyen, Xuan-Hieu Phan, Minh Le Nguyen, and Quang Thuy Ha. 2006a. Viet- namese word segmentation with crfs and svms: An investigation. In Proceedings of the 20th Pacific Asia Conference on Language, Information and Compu- tation, pages 215-222.",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "A fast and accurate vietnamese word segmenter",
"authors": [
{
"first": "",
"middle": [],
"last": "Dat Quoc Nguyen",
"suffix": ""
},
{
"first": "Thanh",
"middle": [],
"last": "Dai Quoc Nguyen",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Vu",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Dras",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Johnson",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1709.06307"
]
},
"num": null,
"urls": [],
"raw_text": "Dat Quoc Nguyen, Dai Quoc Nguyen, Thanh Vu, Mark Dras, and Mark Johnson. 2017. A fast and accu- rate vietnamese word segmenter. arXiv preprint arXiv:1709.06307.",
"links": null
},
"BIBREF32": {
"ref_id": "b32",
"title": "Word segmentation for vietnamese text categorization: an online corpus approach",
"authors": [
{
"first": "",
"middle": [],
"last": "Thanh V Nguyen",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Hoang",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Tran",
"suffix": ""
},
{
"first": "T",
"middle": [
"T"
],
"last": "Thanh",
"suffix": ""
},
{
"first": "Hung",
"middle": [],
"last": "Nguyen",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Nguyen",
"suffix": ""
}
],
"year": 2006,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Thanh V Nguyen, Hoang K Tran, Thanh TT Nguyen, and Hung Nguyen. 2006b. Word segmentation for vietnamese text categorization: an online corpus ap- proach. RIVF06.",
"links": null
},
"BIBREF33": {
"ref_id": "b33",
"title": "A lexicon for vietnamese language processing. Language Resources and Evaluation",
"authors": [
{
"first": "Laurent",
"middle": [],
"last": "Th\u1ecb Minh Huy\u1ec1n Nguy\u1ec5n",
"suffix": ""
},
{
"first": "Mathias",
"middle": [],
"last": "Romary",
"suffix": ""
},
{
"first": "Xu\u00e2n L\u01b0\u01a1ng",
"middle": [],
"last": "Rossignol",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "V\u0169",
"suffix": ""
}
],
"year": 2006,
"venue": "",
"volume": "40",
"issue": "",
"pages": "291--309",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Th\u1ecb Minh Huy\u1ec1n Nguy\u1ec5n, Laurent Romary, Mathias Rossignol, and Xu\u00e2n L\u01b0\u01a1ng V\u0169. 2006. A lexicon for vietnamese language processing. Language Re- sources and Evaluation, 40(3):291-309.",
"links": null
},
"BIBREF34": {
"ref_id": "b34",
"title": "Exploring neural text simplification models",
"authors": [
{
"first": "Sergiu",
"middle": [],
"last": "Nisioi",
"suffix": ""
},
{
"first": "Simone",
"middle": [
"Paolo"
],
"last": "Sanja\u0161tajner",
"suffix": ""
},
{
"first": "Liviu P",
"middle": [],
"last": "Ponzetto",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Dinu",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the 55th annual meeting of the association for computational linguistics",
"volume": "2",
"issue": "",
"pages": "85--91",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sergiu Nisioi, Sanja\u0160tajner, Simone Paolo Ponzetto, and Liviu P Dinu. 2017. Exploring neural text sim- plification models. In Proceedings of the 55th annual meeting of the association for computational linguis- tics (volume 2: Short papers), pages 85-91.",
"links": null
},
"BIBREF35": {
"ref_id": "b35",
"title": "Semeval 2016 task 11: Complex word identification",
"authors": [
{
"first": "Gustavo",
"middle": [],
"last": "Paetzold",
"suffix": ""
},
{
"first": "Lucia",
"middle": [],
"last": "Specia",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)",
"volume": "",
"issue": "",
"pages": "560--569",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gustavo Paetzold and Lucia Specia. 2016. Semeval 2016 task 11: Complex word identification. In Pro- ceedings of the 10th International Workshop on Se- mantic Evaluation (SemEval-2016), pages 560-569.",
"links": null
},
"BIBREF36": {
"ref_id": "b36",
"title": "Lexical Simplification for Non-Native English Speakers",
"authors": [
{
"first": "Gustavo",
"middle": [
"Henrique"
],
"last": "Paetzold",
"suffix": ""
}
],
"year": 2016,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gustavo Henrique Paetzold. 2016. Lexical Simplifica- tion for Non-Native English Speakers. Ph.D. thesis, University of Sheffield.",
"links": null
},
"BIBREF37": {
"ref_id": "b37",
"title": "Scikit-learn: Machine learning in Python",
"authors": [
{
"first": "F",
"middle": [],
"last": "Pedregosa",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Varoquaux",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Gramfort",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Michel",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Thirion",
"suffix": ""
},
{
"first": "O",
"middle": [],
"last": "Grisel",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Blondel",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Prettenhofer",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Weiss",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Dubourg",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Vanderplas",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Passos",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Cournapeau",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Brucher",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Perrot",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Duchesnay",
"suffix": ""
}
],
"year": 2011,
"venue": "Journal of Machine Learning Research",
"volume": "12",
"issue": "",
"pages": "2825--2830",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.",
"links": null
},
"BIBREF38": {
"ref_id": "b38",
"title": "A hybrid approach to word segmentation of vietnamese texts",
"authors": [
{
"first": "H\u00f4ng",
"middle": [],
"last": "Phuong",
"suffix": ""
},
{
"first": "Nguy\u00ean Thi Minh",
"middle": [],
"last": "Huy\u00ean",
"suffix": ""
},
{
"first": "Azim",
"middle": [],
"last": "Roussanaly",
"suffix": ""
},
{
"first": "H\u00f4",
"middle": [
"Tu\u00f2ng"
],
"last": "Vinh",
"suffix": ""
}
],
"year": 2008,
"venue": "International conference on language and automata theory and applications",
"volume": "",
"issue": "",
"pages": "240--249",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "H\u00f4ng Phuong, Nguy\u00ean Thi Minh Huy\u00ean, Azim Rous- sanaly, H\u00f4 Tu\u00f2ng Vinh, et al. 2008. A hybrid ap- proach to word segmentation of vietnamese texts. In International conference on language and automata theory and applications, pages 240-249. Springer.",
"links": null
},
"BIBREF39": {
"ref_id": "b39",
"title": "Frequent words improve readability and short words improve understandability for people with dyslexia",
"authors": [
{
"first": "Luz",
"middle": [],
"last": "Rello",
"suffix": ""
},
{
"first": "Ricardo",
"middle": [],
"last": "Baeza-Yates",
"suffix": ""
},
{
"first": "Laura",
"middle": [],
"last": "Dempere-Marco",
"suffix": ""
},
{
"first": "Horacio",
"middle": [],
"last": "Saggion",
"suffix": ""
}
],
"year": 2013,
"venue": "IFIP Conference on Human-Computer Interaction",
"volume": "",
"issue": "",
"pages": "203--219",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Luz Rello, Ricardo Baeza-Yates, Laura Dempere- Marco, and Horacio Saggion. 2013. Frequent words improve readability and short words improve under- standability for people with dyslexia. In IFIP Con- ference on Human-Computer Interaction, pages 203- 219. Springer.",
"links": null
},
"BIBREF40": {
"ref_id": "b40",
"title": "Making it simplext: Implementation and evaluation of a text simplification system for spanish",
"authors": [
{
"first": "Horacio",
"middle": [],
"last": "Saggion",
"suffix": ""
},
{
"first": "Stefan",
"middle": [],
"last": "Sanja\u0161tajner",
"suffix": ""
},
{
"first": "Simon",
"middle": [],
"last": "Bott",
"suffix": ""
},
{
"first": "Luz",
"middle": [],
"last": "Mille",
"suffix": ""
},
{
"first": "Biljana",
"middle": [],
"last": "Rello",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Drndarevic",
"suffix": ""
}
],
"year": 2015,
"venue": "ACM Transactions on Accessible Computing (TACCESS)",
"volume": "6",
"issue": "4",
"pages": "1--36",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Horacio Saggion, Sanja\u0160tajner, Stefan Bott, Simon Mille, Luz Rello, and Biljana Drndarevic. 2015. Making it simplext: Implementation and evaluation of a text simplification system for spanish. ACM Transactions on Accessible Computing (TACCESS), 6(4):1-36.",
"links": null
},
"BIBREF41": {
"ref_id": "b41",
"title": "A comparison of techniques to automatically identify complex words",
"authors": [
{
"first": "Matthew",
"middle": [],
"last": "Shardlow",
"suffix": ""
}
],
"year": 2013,
"venue": "51st Annual Meeting of the Association for Computational Linguistics Proceedings of the Student Research Workshop",
"volume": "",
"issue": "",
"pages": "103--109",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Matthew Shardlow. 2013. A comparison of techniques to automatically identify complex words. In 51st Annual Meeting of the Association for Computa- tional Linguistics Proceedings of the Student Re- search Workshop, pages 103-109.",
"links": null
},
"BIBREF42": {
"ref_id": "b42",
"title": "A survey of automated text simplification",
"authors": [
{
"first": "Matthew",
"middle": [],
"last": "Shardlow",
"suffix": ""
}
],
"year": 2014,
"venue": "International Journal of Advanced Computer Science and Applications",
"volume": "4",
"issue": "1",
"pages": "58--70",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Matthew Shardlow. 2014. A survey of automated text simplification. International Journal of Advanced Computer Science and Applications, 4(1):58-70.",
"links": null
},
"BIBREF43": {
"ref_id": "b43",
"title": "Syntactic simplification and text cohesion",
"authors": [
{
"first": "Advaith",
"middle": [],
"last": "Siddharthan",
"suffix": ""
}
],
"year": 2006,
"venue": "Research on Language and Computation",
"volume": "4",
"issue": "1",
"pages": "77--109",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Advaith Siddharthan. 2006. Syntactic simplification and text cohesion. Research on Language and Com- putation, 4(1):77-109.",
"links": null
},
"BIBREF44": {
"ref_id": "b44",
"title": "Text simplification using typed dependencies: A comparision of the robustness of different generation strategies",
"authors": [
{
"first": "Advaith",
"middle": [],
"last": "Siddharthan",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the 13th European Workshop on Natural Language Generation",
"volume": "",
"issue": "",
"pages": "2--11",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Advaith Siddharthan. 2011. Text simplification using typed dependencies: A comparision of the robustness of different generation strategies. In Proceedings of the 13th European Workshop on Natural Language Generation, pages 2-11.",
"links": null
},
"BIBREF45": {
"ref_id": "b45",
"title": "Hybrid text simplification using synchronous dependency grammars with hand-written and automatically harvested rules",
"authors": [
{
"first": "Advaith",
"middle": [],
"last": "Siddharthan",
"suffix": ""
},
{
"first": "Angrosh",
"middle": [],
"last": "Mandya",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "722--731",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Advaith Siddharthan and Angrosh Mandya. 2014. Hy- brid text simplification using synchronous depen- dency grammars with hand-written and automatically harvested rules. In Proceedings of the 14th Confer- ence of the European Chapter of the Association for Computational Linguistics, pages 722-731.",
"links": null
},
"BIBREF46": {
"ref_id": "b46",
"title": "Vims: a high-quality vietnamese dataset for abstractive multi-document summarization",
"authors": [
{
"first": "Nhi-Thao",
"middle": [],
"last": "Tran",
"suffix": ""
},
{
"first": "Minh-Quoc",
"middle": [],
"last": "Nghiem",
"suffix": ""
},
{
"first": "T",
"middle": [
"H"
],
"last": "Nhung",
"suffix": ""
},
{
"first": "Ngan",
"middle": [],
"last": "Nguyen",
"suffix": ""
},
{
"first": "Nam",
"middle": [],
"last": "Luu-Thuy Nguyen",
"suffix": ""
},
{
"first": "Dien",
"middle": [],
"last": "Van Chi",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Dinh",
"suffix": ""
}
],
"year": 2020,
"venue": "Language Resources and Evaluation",
"volume": "54",
"issue": "4",
"pages": "893--920",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nhi-Thao Tran, Minh-Quoc Nghiem, Nhung TH Nguyen, Ngan Luu-Thuy Nguyen, Nam Van Chi, and Dien Dinh. 2020. Vims: a high-quality viet- namese dataset for abstractive multi-document sum- marization. Language Resources and Evaluation, 54(4):893-920.",
"links": null
},
"BIBREF47": {
"ref_id": "b47",
"title": "Languages of the Himalayas: an ethnolinguistic handbook of the greater Himalayan region",
"authors": [
{
"first": "George",
"middle": [],
"last": "Van Driem",
"suffix": ""
}
],
"year": 2001,
"venue": "",
"volume": "2",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "George Van Driem. 2001. Languages of the Hi- malayas: an ethnolinguistic handbook of the greater Himalayan region, volume 2. Brill.",
"links": null
},
"BIBREF48": {
"ref_id": "b48",
"title": "Vncorenlp: A vietnamese natural language processing toolkit",
"authors": [],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1801.01331"
]
},
"num": null,
"urls": [],
"raw_text": "Thanh Vu, Dat Quoc Nguyen, Dai Quoc Nguyen, Mark Dras, and Mark Johnson. 2018. Vncorenlp: A viet- namese natural language processing toolkit. arXiv preprint arXiv:1801.01331.",
"links": null
},
"BIBREF49": {
"ref_id": "b49",
"title": "Learning to simplify sentences with quasi-synchronous grammar and integer programming",
"authors": [
{
"first": "Kristian",
"middle": [],
"last": "Woodsend",
"suffix": ""
},
{
"first": "Mirella",
"middle": [],
"last": "Lapata",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "409--420",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kristian Woodsend and Mirella Lapata. 2011. Learning to simplify sentences with quasi-synchronous gram- mar and integer programming. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 409-420.",
"links": null
},
"BIBREF50": {
"ref_id": "b50",
"title": "Optimizing statistical machine translation for text simplification",
"authors": [
{
"first": "Wei",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Courtney",
"middle": [],
"last": "Napoles",
"suffix": ""
},
{
"first": "Ellie",
"middle": [],
"last": "Pavlick",
"suffix": ""
},
{
"first": "Quanze",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Callison-Burch",
"suffix": ""
}
],
"year": 2016,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen, and Chris Callison-Burch. 2016. Optimizing sta- tistical machine translation for text simplification.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"num": null,
"type_str": "figure",
"text": "The frequency distribution of the three full (unsplit) datasets.",
"uris": null
},
"FIGREF1": {
"num": null,
"type_str": "figure",
"text": "The accuracy distributions across possible cutoff frequencies of the three testing datasets.",
"uris": null
},
"TABREF1": {
"html": null,
"type_str": "table",
"num": null,
"text": "Preliminary quantitative information of the three corpora. [REA = READABILITY, CLU = CLUS-TER, CLA = CLASSIFICATION] * involves manual processing to remove foreign words and invalid words",
"content": "<table/>"
},
"TABREF3": {
"html": null,
"type_str": "table",
"num": null,
"text": "The accuracy, precision, recall, and F1 scores of the Frequency Threshold approach across the three",
"content": "<table><tr><td colspan=\"3\">testing datasets. [REA = READABILITY, CLU = CLUSTER, CLA = CLASSIFICATION]</td></tr><tr><td/><td colspan=\"2\">cutoff frequency cutoff percentile</td></tr><tr><td>REA</td><td>154</td><td>91.5%</td></tr><tr><td>CLU</td><td>21</td><td>79.6%</td></tr><tr><td>CLA</td><td>168</td><td>92.6%</td></tr></table>"
},
"TABREF4": {
"html": null,
"type_str": "table",
"num": null,
"text": "",
"content": "<table><tr><td>: The cutoff frequency and the cutoff percentile of the three testing datasets. [REA = READABILITY, CLU = CLUSTER, CLA = CLASSIFICATION]</td></tr></table>"
},
"TABREF6": {
"html": null,
"type_str": "table",
"num": null,
"text": "The accuracy, precision, recall, and F1 scores of the SVM classifier of the three testing datasets.",
"content": "<table><tr><td colspan=\"5\">[REA = READABILITY, CLU = CLUSTER, CLA = CLAS-SIFICATION]</td></tr><tr><td/><td colspan=\"3\">accuracy precision recall</td><td>F1</td></tr><tr><td>All</td><td>0.437</td><td>0.727</td><td colspan=\"2\">0.459 0.563</td></tr><tr><td>M</td><td>0.824</td><td>1.0</td><td colspan=\"2\">0.739 0.850</td></tr></table>"
},
"TABREF7": {
"html": null,
"type_str": "table",
"num": null,
"text": "",
"content": "<table><tr><td>: The accuracy, precision, recall, and F1 scores of the human annotation process. [M = Majority]</td></tr></table>"
}
}
}
}