| { |
| "paper_id": "R15-1014", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:56:51.732751Z" |
| }, |
| "title": "Readability Assessment of Translated Texts", |
| "authors": [ |
| { |
| "first": "Alina", |
| "middle": [ |
| "Maria" |
| ], |
| "last": "Ciobanu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Bucharest", |
| "location": {} |
| }, |
| "email": "alina.ciobanu@my.fmi.unibuc.ro" |
| }, |
| { |
| "first": "Liviu", |
| "middle": [ |
| "P" |
| ], |
| "last": "Dinu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Bucharest", |
| "location": {} |
| }, |
| "email": "liviu.p.dinu@gmail.com" |
| }, |
| { |
| "first": "Flaviu", |
| "middle": [ |
| "Ioan" |
| ], |
| "last": "Pepelea", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "flaviupepelea@gmail.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper we investigate how readability varies between texts originally written in English and texts translated into English. For quantification, we analyze several factors that are relevant in assessing readability-shallow, lexical and morpho-syntactic features-and we employ the widely used Flesch-Kincaid formula to measure the variation of the readability level between original English texts and texts translated into English. Finally, we analyze whether the readability features have enough discriminative power to distinguish between originals and translations.", |
| "pdf_parse": { |
| "paper_id": "R15-1014", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper we investigate how readability varies between texts originally written in English and texts translated into English. For quantification, we analyze several factors that are relevant in assessing readability-shallow, lexical and morpho-syntactic features-and we employ the widely used Flesch-Kincaid formula to measure the variation of the readability level between original English texts and texts translated into English. Finally, we analyze whether the readability features have enough discriminative power to distinguish between originals and translations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The products of translation generally differ from original, non-translated texts. According to Koppel and Ordan (2011) , two main aspects that lead to differences between the two categories have been identified: 1) effects of the translation process that are independent of the source language; 2) effects of the source language on the translation product, also known as source language interference. According to Sun (2012) , the reception of a translated text is related to cross-cultural readability. Translators need to understand the particularities of both the source and the target language in order to transfer the meaning of the text from one language to another. While rendering the source language text into the target language, it is also important to maintain the style of the document. Various genres of text might be translated for different purposes, which influence the choice of the translation strategy. For example, for political speeches the purpose is to report exactly what is communicated in a given text (Trosborg, 1997) . In this paper we investigate how readability features differ between original and translated texts.", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 118, |
| "text": "Koppel and Ordan (2011)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 414, |
| "end": 424, |
| "text": "Sun (2012)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1029, |
| "end": 1045, |
| "text": "(Trosborg, 1997)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction and Related Work", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Systems for automatic readability assessment have received an increasing attention during the last decade. While research focused initially on English, further studies have shown a growing interest in other languages, such as Spanish (Huerta, 1959) , French (Kandel and Moles, 1958) or Italian (Franchina and Vacca, 1986; Fran\u00e7ois and Miltsakaki, 2012) . Readability assessment systems have a wide variety of applications. We mention here only a few: 1) they provide assistance in selecting reading material with an appropriate level of complexity from a large collection of documents, for second language learners and people with disabilities or low literacy skills (Collins-Thompson, 2011) ; 2) they help adapting the technical documents to various levels of medical expertise, within the medical domain (Elhadad and Sutaria, 2007) ; 3) they assist the processes of machine translation, text simplification, or speech recognition and evaluate their effectiveness, in the research area of NLP (Aluisio et al., 2010; Stymne et al., 2013) .", |
| "cite_spans": [ |
| { |
| "start": 234, |
| "end": 248, |
| "text": "(Huerta, 1959)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 258, |
| "end": 282, |
| "text": "(Kandel and Moles, 1958)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 294, |
| "end": 321, |
| "text": "(Franchina and Vacca, 1986;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 322, |
| "end": 352, |
| "text": "Fran\u00e7ois and Miltsakaki, 2012)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 667, |
| "end": 691, |
| "text": "(Collins-Thompson, 2011)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 806, |
| "end": 833, |
| "text": "(Elhadad and Sutaria, 2007)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 994, |
| "end": 1016, |
| "text": "(Aluisio et al., 2010;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1017, |
| "end": 1037, |
| "text": "Stymne et al., 2013)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction and Related Work", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Most of the traditional readability approaches investigate shallow text properties to determine the complexity of a text, based on assumptions which correlate surface features with the linguistic factors which influence readability. For example, the average number of characters or syllables per word, the average number of words per sentence and the percentage of words not occurring among the most frequent n words in a language are correlated with the lexical, syntactic and, respectively, the semantic complexity of the text. The Flesch-Kincaid measure (Kincaid et al., 1975) employs the average number of syllables per word and the average number of words per sentence to assess readability, while the Automated Readability Index (Smith and Senter, 1967) and the Coleman-Liau metric (Coleman and Liau, 1975 ) measure word length based on character count rather than syllable count; they are func-tions of both the average number of characters per word and the average number of words per sentence. Gunning Fog (Gunning, 1952) and SMOG (McLaughlin, 1969) account also for the percentage of polysyllabic words and the Dale-Chall formula (Dale and Chall, 1995) relies on lists of most frequent words to assess readability.", |
| "cite_spans": [ |
| { |
| "start": 557, |
| "end": 579, |
| "text": "(Kincaid et al., 1975)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 735, |
| "end": 759, |
| "text": "(Smith and Senter, 1967)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 788, |
| "end": 811, |
| "text": "(Coleman and Liau, 1975", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1015, |
| "end": 1030, |
| "text": "(Gunning, 1952)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1035, |
| "end": 1058, |
| "text": "SMOG (McLaughlin, 1969)", |
| "ref_id": null |
| }, |
| { |
| "start": 1140, |
| "end": 1162, |
| "text": "(Dale and Chall, 1995)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction and Related Work", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The problem that we investigate in this paper is how the readability level varies across original and translated texts (from various source languages). We identify utterances from Europarl in a wide variety of languages, we identify their translations into English, and on these English translations we conduct a quantitative analysis of the readability features. As most research on readability focused on English so far, there are several formulas, features and tools available for quantifying the differences in the level of readability.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Approach", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this paper we complement our previous analysis (Ciobanu and Dinu, 2014) on the readability features for the original texts and their translations. Here we focus on the target language, analyzing whether different source languages lead to differences in the readability level for the translated texts.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 74, |
| "text": "(Ciobanu and Dinu, 2014)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Approach", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We run our experiments on Europarl (Koehn, 2005) , a multilingual parallel corpus extracted from the proceedings of the European Parliament. Its main intended use is as aid for statistical machine translation research (Tiedemann, 2012) . The corpus is tokenized and aligned in 21 languages. In Table 1 we report statistics extracted from our dataset. Given the fact that the Flesch-Kincaid formula is based on the average number of words per sentence and on the average number of syllables per word, the differences between the languages (in terms of the number of speakers and sentences) do not affect the results.", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 48, |
| "text": "(Koehn, 2005)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 218, |
| "end": 235, |
| "text": "(Tiedemann, 2012)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 294, |
| "end": 301, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "According to van Halteren (2008), translations in the European Parliament are generally made by native speakers of the target language. Translation is an inherent part of the political activity (Sch\u00e4ffner and Bassnett, 2010) and has a high influence on the way the political speeches are perceived. The question posed by Sch\u00e4ffner and Bassnett (2010) linguistic, cultural and ideological boundaries?\" summarizes the complexity of the process of translating political documents. Political texts might contain complex technical terms and elaborated sentences. Therefore, the results of our experiments are probably domain-specific and cannot be generalized to other types of text. Although parliamentary documents probably have a low readability level, our investigation is not negatively influenced by the choice of corpus because we are consistent across all experiments in terms of text gender and we report results obtained solely by comparison between source and target languages.", |
| "cite_spans": [ |
| { |
| "start": 194, |
| "end": 224, |
| "text": "(Sch\u00e4ffner and Bassnett, 2010)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 321, |
| "end": 350, |
| "text": "Sch\u00e4ffner and Bassnett (2010)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "To obtain the dataset for our experiments, we follow the pre-processing steps described by Ciobanu and Dinu (2014) . We extract segments of text written in English, we identify their source languages, and we group them based on the language of the speaker. We compute the Flesch-Kicaid formula for each collection of segments of text T i having the source language L i and the target language English. The files contain annotations for marking the document (<chapter>), the speaker (<speaker>) and the paragraph (<p>). Some documents have the attribute language for the speaker tag, which indicates the language used by the original speaker. Another way of annotating the original language is by having the language abbreviation written between parentheses at the beginning of each segment of text. However, there are segments where the language is not marked in either of the two ways. We account only for sentences for which the original language could be determined. We handle inconsistent encodings and values generated by the automatic extraction of the information from the website of the European Parliament, such as the occurrence of more than one speaker names in the <speaker> tag, separated either by a comma or by the and conjunction, or the occurrence of a speaker's affiliation in the <speaker> tag, e.g., Ana Maria Gomes (PSE). We discard the transcribers' descriptions of the parliamentary sessions (such as \"Applause\" or \"The President interrupted the speaker\").", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 114, |
| "text": "Ciobanu and Dinu (2014)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-processing", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In this section we describe our experiments on the variability of the readability feature values for original English texts and texts translated into English from various source languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We employ the Flesch-Kincaid measure (Kincaid et al., 1975) , which assesses readability based on the average number of syllables per word and the average number of words per sentence. The Flesch-Kincaid formula is one of the most widely used readability metrics developed for English. It assesses the level of readability accounting for the number of syllables per word (as an approximation of the difficulty of a word) and for the number of words per sentence (as estimation of the syntactic difficulty of a text). The metric is computed as follows:", |
| "cite_spans": [ |
| { |
| "start": 37, |
| "end": 59, |
| "text": "(Kincaid et al., 1975)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Flesch-Kincaid", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "0.39 #words #sentences + 11.8 #syllables #words \u2212 15.59.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Flesch-Kincaid", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The Flesch-Kincaid formula produces values which correspond with U.S. grade levels. We ap-ply this measure on English texts, either originally written in English or translated from other languages. To determine the number of syllable for English words, we employ CMU Pronouncing Dictionary 1 , a machine-readable dictionary that contains over 125,000 words and provides information regarding their syllabication. In order to investigate and compare the readability level for original English texts and texts translated from other languages, we complete the following experiments. In a first phase, we compute the Flesh-Kincaid metric for each language, for all the concatenated files in our Europarl subcorpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Flesch-Kincaid", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The readability of a text depends, among other things, on its author. We investigate whether the readability level characterizes certain speakers, if it varies across different utterances of the same speaker and if the readability level for a language is influenced by speakers having odd readability levels associated. For this purpose, we designed three experiments based on the same idea -identification of outliers in our dataset. Further, in order to eliminate a confounding factor, namely the individuality of the speakers, to focus on the source language of the text, we perform three stages of pruning for our dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Outliers Removal", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "\u2022 S1: For each language, we account for the overall readability score computed for all documents of each speaker; based on these computed values, we determine outliers and remove them from the dataset; then, we rerun the experiments based on Flesch-Kincaid measure for the remaining speakers. In order to achieve this, we divide the dataset based on the source language of the segments of text and for each language we divide the segments of text based on the speaker. We compute the overall readability score for the utterances of each speaker and, after dividing the segments of text from the dataset based on the speakers, we compute the standard quartiles Q1, Q2 and Q3 with regard to the overall level of readability for each speaker. We use the interquartile range IQR = Q3 \u2212 Q1 to find outliers in data. For our experiments, we consider outliers the observations that fall below Q1 \u2212 1.5(IQR) (lower fence -LF ) or above Q3 + 1.5(IQR) (upper fence -U F ) (Sheskin, 2003) . We compute the Flesch-Kincaid formula again accounting only for the speakers having the individual level of readability in [LF, U F ] range.", |
| "cite_spans": [ |
| { |
| "start": 962, |
| "end": 977, |
| "text": "(Sheskin, 2003)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Outliers Removal", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "\u2022 S2: We repeat the previous experiment introducing a further level of granularity: we investigate outliers for each speaker by computing the Flesch-Kincaid metric individually for each document belonging to a speaker. We discard documents whose levels of readability are outliers and we compute the Flesch-Kincaid formula again accounting only for the documents having the individual level of readability in the [LF, U F ] range.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Outliers Removal", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "\u2022 S3: In the last experiment we consider, for each language, the readability scores of each document belonging to each speaker. We apply the same strategy as before: we detect outliers among documents and remove them from the dataset. Then we compute the Flesch-Kincaid measure again, for the concatenation of all the remaining documents after outliers removal, for each language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Outliers Removal", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "In Table 2 , column 2, we report the Flesch-Kincaid values for all 21 languages. One can notice that the lowest Flesh-Kincaid value belongs to the collection of texts having English as the source language, followed by texts having Germanic source languages, texts having Slavic source languages and, finally, texts translated from Romance languages. Finno-Ugric languages represent the only family that doesn't form a cluster with regard to the Flesch-Kincaid metric value. Among the Romance languages, French is the only one that sets apart from the group, being closer to the Germanic cluster. For the outliers removal experiment we report the results in Table 2 , columns 3-5. The results are very similar to those of the initial experiment, suggesting that although there are outliers in the data (in Figure 1 we represent the boxplot for the Flesch-Kincaid values for each speaker's utterances), their presence does not impact significantly the overall readability values.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 657, |
| "end": 664, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 805, |
| "end": 813, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "In this section we investigate the readability of translation as a classification problem. Taking as input original English sentences and sentences", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Lang.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Flesch-Kincaid before remo-after pruning ving outliers S1 S2 S3 EN 11.45 11.50 11.47 11.51 SV 11.50 11.49 11.45 11.44 NL 11.56 11.55 11.51 11.50 DA 11.95 11.94 11.90 11.89 FI 11.99 12.01 11.95 Table 2 : Flesch-Kincaid values for our Europarl subset before (column 2) and after (columns 3-5) removing outliers.", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 192, |
| "text": "S3 EN 11.45 11.50 11.47 11.51 SV 11.50 11.49 11.45 11.44 NL 11.56 11.55 11.51 11.50 DA 11.95 11.94 11.90 11.89 FI 11.99 12.01 11.95", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 193, |
| "end": 200, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "translated from other languages, our goal is to see whether the readability features have enough discriminative power to distinguish original from translated text. Thus, we train a logistic regression classifier 2 for a binary decision problem: original versus translation. We extract randomly from our dataset 1,000 English original sentences and 1,000 sentences translated into English 3 . We split this dataset into train and test subsets with a 3:1 ratio. We choose the optimal value for the logistic regression regularization parameter performing 3-fold cross-validation on the training set (we search over {10 \u22123 , ..., 10 3 }). Finally, we evaluate the model on the test set. Language 0 5 10 15 20 25 30 Flesch-Kincaid Figure 1 : Boxplot for the Flesch-Kincaid values for each speaker's utterances, grouped by the language of the speaker.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 683, |
| "end": 724, |
| "text": "Language 0 5 10 15 20 25 30", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 740, |
| "end": 748, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We use several shallow, lexical and morpho-syntactic features that were traditionally used for assessing readability and have proven high discriminative power within readability metrics:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "\u2022 Shallow Features -Average number of words per sentence. The average sentence length is one of the most widely used metrics for determining readability level and was employed in numerous readability formulas, proving to be most meaningful in combined evidence with average word frequency. Feng et al. (2010) find the average sentence length to have higher predictive power than the other lexical and syllable-based features they used. -Average number of characters (or syllables) per word. It is generally considered that frequently occurring words are usually short, so the average number of characters per word was broadly used for measuring readability in a robust manner. Many readability formulas measure word length in syllables rather than letters.", |
| "cite_spans": [ |
| { |
| "start": 290, |
| "end": 308, |
| "text": "Feng et al. (2010)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "\u2022 Lexical Features -Type/Token Ratio. The proportion between the number of lexical types and the number of tokens indicates the range of use of vocabulary. The higher the value of this feature, the higher the variability of the vocabulary used in the text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "\u2022 Morpho-Syntactic Features -Relative frequency of POS unigrams.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "The ratio for 5 POS (verbs, nouns, pronouns, adjectives and adverbs), computed individually on a per-token basis 4 . -Lexical density. The proportion of content words (verbs, nouns, adjectives and adverbs), computed on a per-token basis. Grammatical features were shown to be useful in readability prediction (Heilman et al., 2007) .", |
| "cite_spans": [ |
| { |
| "start": 309, |
| "end": 331, |
| "text": "(Heilman et al., 2007)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "The optimal value for the logistic regression regularization parameter is found to be 1. We obtain 0.59 F-score on the test set, on average, in deciding whether a sentence was translated into English or is an original English sentence. In Table 3 we report the precision, recall and F-score for the prediction task. We also report 95% confidence intervals (CI) measured on 1,000 iterations of bootstrap resampling with replacement (Koehn, 2004) . The most informative features are morphological features, more specifically the POS ratios, as shown in Table 4 . These results are significantly lower than state-of-the-art performance ", |
| "cite_spans": [ |
| { |
| "start": 431, |
| "end": 444, |
| "text": "(Koehn, 2004)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 239, |
| "end": 246, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 551, |
| "end": 558, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "In this paper we investigate the impact of translation on readability as a two-fold problem. Firstly, we investigate how the Flesch-Kincaid values vary for original English texts and for translations form different languages into English. We notice that the values form clusters for the investigated language families. Secondly, we use a set of shallow, lexical and morpho-syntactic readability features to investigate whether readability features have enough discriminative power to distinguish original English texts from translations. We obtain 0.59 F-score, on average, using only readability features, and an improvement to 0.75 when we add n-grams of tokens and POS tags as features. Our results show that, although the readability level of translated texts is similar for texts having the source language in the same language families, readability features do not have enough discriminative power to obtain high performance on distinguishing original texts from translations. However, using only readability features the prediction F-score is significantly better than chance (p < 0.05).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In our future work, we intend to enrich the variety of the texts, beginning with an analysis of translations of literary works. As far as resources are available, we plan to investigate other readability metrics as well. We believe our method can 5 Repeating the classification experiment for each source language (that is, considering translations from each source language Li, except for English, one at a time) shows that the differences in performance are not statistically significant (p < 0.05). Thus, we conclude that readability features cannot discriminate between original texts and translations significantly better for some of the source languages than for the others. Table 4 : Logistic regression coefficients for readability features (the higher the absolute value of the coefficient, the more informative the feature).", |
| "cite_spans": [ |
| { |
| "start": 247, |
| "end": 248, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 681, |
| "end": 688, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "provide useful information regarding the difficulty of translation from one language into another in terms of readability.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Coefficient", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.speech.cs.cmu.edu/cgi-bin/cmudict", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We use the scikit-learn library(Pedregosa et al., 2011).3 We work with only 1,000 sentences in order to have a stratified dataset, since for English the number of sentences we identified is 1,262. The subset of translated sentences is also stratified: 50 from each of the 20 languages that we investigate, besides English.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For tokenization, lemmatization and part of speech tagging we use the Stanford CoreNLP Natural Language Processing Toolkit(Manning et al., 2014).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank the anonymous reviewers for their helpful and constructive comments. The contribution of the authors to this paper is equal. Liviu P. Dinu was supported by UEFISCDI, PNII-ID-PCE-2011-3-0959.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Readability Assessment for Text Simplification", |
| "authors": [ |
| { |
| "first": "Sandra", |
| "middle": [], |
| "last": "Aluisio", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| }, |
| { |
| "first": "Caroline", |
| "middle": [], |
| "last": "Gasperin", |
| "suffix": "" |
| }, |
| { |
| "first": "Carolina", |
| "middle": [], |
| "last": "Scarton", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the NAACL HLT 2010 Fifth Workshop on Innovative Use of NLP for Building Educational Applications, IUNLPBEA 2010", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sandra Aluisio, Lucia Specia, Caroline Gasperin, and Carolina Scarton. 2010. Readability Assessment for Text Simplification. In Proceedings of the NAACL HLT 2010 Fifth Workshop on Innovative Use of NLP for Building Educational Applications, IUNLPBEA 2010, pages 1-9.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A Quantitative Insight into the Impact of Translation on Readability", |
| "authors": [ |
| { |
| "first": "Alina", |
| "middle": [ |
| "Maria" |
| ], |
| "last": "Ciobanu", |
| "suffix": "" |
| }, |
| { |
| "first": "Liviu", |
| "middle": [], |
| "last": "Dinu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 3rd Workshop on Predicting and Improving Text Readability for Target Reader Populations", |
| "volume": "", |
| "issue": "", |
| "pages": "104--113", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alina Maria Ciobanu and Liviu Dinu. 2014. A Quanti- tative Insight into the Impact of Translation on Read- ability. In Proceedings of the 3rd Workshop on Pre- dicting and Improving Text Readability for Target Reader Populations, PITR 2014, pages 104-113.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A Computer Readability Formula Designed for Machine Scoring", |
| "authors": [ |
| { |
| "first": "Meri", |
| "middle": [], |
| "last": "Coleman", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "L" |
| ], |
| "last": "Liau", |
| "suffix": "" |
| } |
| ], |
| "year": 1975, |
| "venue": "Journal of Applied Psychology", |
| "volume": "60", |
| "issue": "2", |
| "pages": "283--284", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Meri Coleman and T. L. Liau. 1975. A Computer Readability Formula Designed for Machine Scoring. Journal of Applied Psychology, 60(2):283-284.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Enriching Information Retrieval with Reading Level Prediction", |
| "authors": [ |
| { |
| "first": "Kevyn", |
| "middle": [], |
| "last": "Collins-Thompson", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "SI-GIR 2011 Workshop on Enriching Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevyn Collins-Thompson. 2011. Enriching Informa- tion Retrieval with Reading Level Prediction. In SI- GIR 2011 Workshop on Enriching Information Re- trieval.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Readability Revisited: The New Dale-Chall Readability Formula", |
| "authors": [ |
| { |
| "first": "Edgar", |
| "middle": [], |
| "last": "Dale", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeanne", |
| "middle": [], |
| "last": "Chall", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edgar Dale and Jeanne Chall. 1995. Readability Re- visited: The New Dale-Chall Readability Formula. Brookline Books, Cambridge.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Mining a Lexicon of Technical Terms and Lay Equivalents", |
| "authors": [ |
| { |
| "first": "Noemie", |
| "middle": [], |
| "last": "Elhadad", |
| "suffix": "" |
| }, |
| { |
| "first": "Komal", |
| "middle": [], |
| "last": "Sutaria", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the Workshop on BioNLP 2007: Biological, Translational, and Clinical Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "49--56", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noemie Elhadad and Komal Sutaria. 2007. Mining a Lexicon of Technical Terms and Lay Equivalents. In Proceedings of the Workshop on BioNLP 2007: Bi- ological, Translational, and Clinical Language Pro- cessing, BioNLP 2007, pages 49-56.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A Comparison of Features for Automatic Readability Assessment", |
| "authors": [ |
| { |
| "first": "Lijun", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Jansche", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Huenerfauth", |
| "suffix": "" |
| }, |
| { |
| "first": "No\u00e9mie", |
| "middle": [], |
| "last": "Elhadad", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 23rd International Conference on Computational Linguistics: Posters, COLING 2010", |
| "volume": "", |
| "issue": "", |
| "pages": "276--284", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lijun Feng, Martin Jansche, Matt Huenerfauth, and No\u00e9mie Elhadad. 2010. A Comparison of Fea- tures for Automatic Readability Assessment. In Proceedings of the 23rd International Conference on Computational Linguistics: Posters, COLING 2010, pages 276-284.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Do NLP and Machine Learning Improve Traditional Readability Formulas?", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Fran\u00e7ois", |
| "suffix": "" |
| }, |
| { |
| "first": "Eleni", |
| "middle": [], |
| "last": "Miltsakaki", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the First Workshop on Predicting and Improving Text Readability for Target Reader Populations, PITR 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "49--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Fran\u00e7ois and Eleni Miltsakaki. 2012. Do NLP and Machine Learning Improve Traditional Read- ability Formulas? In Proceedings of the First Work- shop on Predicting and Improving Text Readability for Target Reader Populations, PITR 2012, pages 49-57.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Adaptation of Flesch Readability Index on a Bilingual Text Written by the Same Author both in Italian and English Languages", |
| "authors": [ |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Franchina", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Vacca", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "Linguaggi", |
| "volume": "3", |
| "issue": "", |
| "pages": "47--49", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valerio Franchina and Roberto Vacca. 1986. Adapta- tion of Flesch Readability Index on a Bilingual Text Written by the Same Author both in Italian and En- glish Languages. Linguaggi, 3:47-49.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "The Technique of Clear Writing", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Gunning", |
| "suffix": "" |
| } |
| ], |
| "year": 1952, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Gunning. 1952. The Technique of Clear Writ- ing. McGraw-Hill.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Combining Lexical and Grammatical Features to Improve Readability Measures for First and Second Language Texts", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Heilman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevyn", |
| "middle": [], |
| "last": "Collins-Thompson", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Callan", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxine", |
| "middle": [], |
| "last": "Eskenazi", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the Human Language Technology Conference of the North American Chapter of the Association of Computational Linguistics, HLT-NAACL 2007", |
| "volume": "", |
| "issue": "", |
| "pages": "460--467", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Heilman, Kevyn Collins-Thompson, Jamie Callan, and Maxine Eskenazi. 2007. Combining Lexical and Grammatical Features to Improve Read- ability Measures for First and Second Language Texts. In Proceedings of the Human Language Tech- nology Conference of the North American Chap- ter of the Association of Computational Linguistics, HLT-NAACL 2007, pages 460-467.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Medida Sencillas de Lecturabilidad. Consigna", |
| "authors": [ |
| { |
| "first": "Fernandez", |
| "middle": [], |
| "last": "Huerta", |
| "suffix": "" |
| } |
| ], |
| "year": 1959, |
| "venue": "", |
| "volume": "214", |
| "issue": "", |
| "pages": "29--32", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fernandez Huerta. 1959. Medida Sencillas de Lec- turabilidad. Consigna, 214:29-32.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Application de l'Indice de Flesch a la Langue Fran\u00e7aise", |
| "authors": [ |
| { |
| "first": "Lilian", |
| "middle": [], |
| "last": "Kandel", |
| "suffix": "" |
| }, |
| { |
| "first": "Abraham", |
| "middle": [], |
| "last": "Moles", |
| "suffix": "" |
| } |
| ], |
| "year": 1958, |
| "venue": "Cahiers Etudes de Radio-Television", |
| "volume": "19", |
| "issue": "", |
| "pages": "253--274", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lilian Kandel and Abraham Moles. 1958. Application de l'Indice de Flesch a la Langue Fran\u00e7aise. Cahiers Etudes de Radio-Television, 19:253-274.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Derivation of New Readability Formulas (Automated Readability Index, Fog Count and Flesch Reading Ease Formula) for Navy Enlisted Personnel", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Kincaid", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [ |
| "P" |
| ], |
| "last": "Fishburne", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Richard", |
| "suffix": "" |
| }, |
| { |
| "first": "Brad", |
| "middle": [ |
| "S" |
| ], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chissom", |
| "suffix": "" |
| } |
| ], |
| "year": 1975, |
| "venue": "Research Branch Report", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Kincaid, Robert P. Fishburne Jr., Richard L. Rogers, and Brad S. Chissom. 1975. Deriva- tion of New Readability Formulas (Automated Read- ability Index, Fog Count and Flesch Reading Ease Formula) for Navy Enlisted Personnel. Research Branch Report, Millington, TN: Chief of Naval Training.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Statistical Significance Tests for Machine Translation Evaluation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "388--395", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2004. Statistical Significance Tests for Machine Translation Evaluation. In Proceedings of the 2004 Conference on Empirical Methods in Natu- ral Language Processing, EMNLP 2004, pages 388- 395.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Europarl: A Parallel Corpus for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 10th Machine Translation Summit", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2005. Europarl: A Parallel Corpus for Statistical Machine Translation. In Proceedings of the 10th Machine Translation Summit, pages 79-86.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Translationese and Its Dialects", |
| "authors": [ |
| { |
| "first": "Moshe", |
| "middle": [], |
| "last": "Koppel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Ordan", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, ACL 2011", |
| "volume": "", |
| "issue": "", |
| "pages": "1318--1326", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moshe Koppel and Noam Ordan. 2011. Translationese and Its Dialects. In Proceedings of the 49th An- nual Meeting of the Association for Computational Linguistics: Human Language Technologies, ACL 2011, pages 1318-1326.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The Stanford CoreNLP Natural Language Processing Toolkit", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jenny", |
| "middle": [], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [ |
| "J" |
| ], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mc-Closky", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of 52nd Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "55--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher D. Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven J. Bethard, and David Mc- Closky. 2014. The Stanford CoreNLP Natural Lan- guage Processing Toolkit. In Proceedings of 52nd Annual Meeting of the Association for Computa- tional Linguistics: System Demonstrations, pages 55-60.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "SMOG Grading: a New Readability Formula", |
| "authors": [ |
| { |
| "first": "Harry", |
| "middle": [], |
| "last": "Mclaughlin", |
| "suffix": "" |
| } |
| ], |
| "year": 1969, |
| "venue": "Journal of Reading", |
| "volume": "12", |
| "issue": "8", |
| "pages": "639--646", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Harry McLaughlin. 1969. SMOG Grading: a New Readability Formula. Journal of Reading, 12(8):639-646.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Scikit-learn: Machine Learning in Python", |
| "authors": [ |
| { |
| "first": "Fabian", |
| "middle": [], |
| "last": "Pedregosa", |
| "suffix": "" |
| }, |
| { |
| "first": "Ga\u00ebl", |
| "middle": [], |
| "last": "Varoquaux", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Gramfort", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "Bertrand", |
| "middle": [], |
| "last": "Thirion", |
| "suffix": "" |
| }, |
| { |
| "first": "Olivier", |
| "middle": [], |
| "last": "Grisel", |
| "suffix": "" |
| }, |
| { |
| "first": "Mathieu", |
| "middle": [], |
| "last": "Blondel", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Prettenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "Ron", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Dubourg", |
| "suffix": "" |
| }, |
| { |
| "first": "Jake", |
| "middle": [], |
| "last": "Vanderplas", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Passos", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Cournapeau", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Brucher", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthieu", |
| "middle": [], |
| "last": "Perrot", |
| "suffix": "" |
| }, |
| { |
| "first": "Duchesnay", |
| "middle": [], |
| "last": "And\u00e9douard", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2825--2830", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabian Pedregosa, Ga\u00ebl Varoquaux, Alexandre Gram- fort, Vincent Michel, Bertrand Thirion, Olivier Grisel, Mathieu Blondel, Peter Prettenhofer, Ron Weiss, Vincent Dubourg, Jake Vanderplas, Alexan- dre Passos, David Cournapeau, Matthieu Brucher, Matthieu Perrot, and\u00c9douard Duchesnay. 2011. Scikit-learn: Machine Learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Politics, Media and Translation -Exploring Synergies", |
| "authors": [ |
| { |
| "first": "Christina", |
| "middle": [], |
| "last": "Sch\u00e4ffner", |
| "suffix": "" |
| }, |
| { |
| "first": "Susan", |
| "middle": [], |
| "last": "Bassnett", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Political Discourse, Media and Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "1--29", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christina Sch\u00e4ffner and Susan Bassnett. 2010. Pol- itics, Media and Translation -Exploring Syner- gies. In Political Discourse, Media and Transla- tion, pages 1-29. Newcastle upon Tyne: Cambridge Scholars Publishing.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Handbook of Parametric and Nonparametric Statistical Procedures", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [ |
| "J" |
| ], |
| "last": "Sheskin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David J. Sheskin. 2003. Handbook of Parametric and Nonparametric Statistical Procedures. CRC Press.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Automated Readability Index. Wright-Patterson Air Force Base", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Edgar", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "J" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Senter", |
| "suffix": "" |
| } |
| ], |
| "year": 1967, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edgar A. Smith and R. J. Senter. 1967. Auto- mated Readability Index. Wright-Patterson Air Force Base. AMRL-TR-6620.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Statistical Machine Translation with Readability Constraints", |
| "authors": [ |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Stymne", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Hardmeier", |
| "suffix": "" |
| }, |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 19th Nordic Conference on Computational Linguistics, NODALIDA 2013", |
| "volume": "", |
| "issue": "", |
| "pages": "375--386", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sara Stymne, J\u00f6rg Tiedemann, Christian Hardmeier, and Joakim Nivre. 2013. Statistical Machine Trans- lation with Readability Constraints. In Proceedings of the 19th Nordic Conference on Computational Linguistics, NODALIDA 2013, pages 375-386.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Translation and Strategies for Cross-Cultural Communication", |
| "authors": [ |
| { |
| "first": "Yifeng", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Chinese Translators Journal", |
| "volume": "33", |
| "issue": "1", |
| "pages": "16--23", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yifeng Sun. 2012. Translation and Strategies for Cross-Cultural Communication. Chinese Transla- tors Journal, 33(1):16-23.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Parallel Data, Tools and Interfaces in OPUS", |
| "authors": [ |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 8th International Conference on Language Resources and Evaluation, LREC 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "2214--2218", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J\u00f6rg Tiedemann. 2012. Parallel Data, Tools and Inter- faces in OPUS. In Proceedings of the 8th Interna- tional Conference on Language Resources and Eval- uation, LREC 2012, pages 2214-2218.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Text Typology and Translation", |
| "authors": [], |
| "year": 1997, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anna Trosborg, editor. 1997. Text Typology and Trans- lation. Benjamins Translation Library.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Source Language Markers in EUROPARL Translations", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hans Van Halteren", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 22nd International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "937--944", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hans van Halteren. 2008. Source Language Mark- ers in EUROPARL Translations. In Proceedings of the 22nd International Conference on Computa- tional Linguistics, COLING 2008, pages 937-944.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "html": null, |
| "content": "<table><tr><td>Lang.</td><td># speakers</td><td># sentences</td></tr><tr><td>EN</td><td/><td>1,262</td></tr><tr><td>SV</td><td/><td>80,171</td></tr><tr><td>NL</td><td/><td>156,836</td></tr><tr><td>DA</td><td/><td>37,045</td></tr><tr><td>FI</td><td/><td>36,768</td></tr><tr><td>DE</td><td/><td>300,672</td></tr><tr><td>ET</td><td/><td>4,284</td></tr><tr><td>MT</td><td/><td>2,790</td></tr><tr><td>PL</td><td/><td>62,479</td></tr><tr><td>FR</td><td/><td>264,460</td></tr><tr><td>LV</td><td/><td>4,652</td></tr><tr><td>SL</td><td/><td>8,576</td></tr><tr><td>HU</td><td/><td>23,129</td></tr><tr><td>CS</td><td/><td>20,637</td></tr><tr><td>BG</td><td/><td>5,432</td></tr><tr><td>SK</td><td/><td>13,873</td></tr><tr><td>LT</td><td/><td>14,834</td></tr><tr><td>ES</td><td/><td>116,834</td></tr><tr><td>RO</td><td/><td>24,586</td></tr><tr><td>IT</td><td/><td>109,297</td></tr><tr><td>PT</td><td/><td>98,653</td></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "text": "\"What exactly happens in the complex processes of recontextualisation across" |
| }, |
| "TABREF1": { |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null, |
| "text": "" |
| }, |
| "TABREF4": { |
| "html": null, |
| "content": "<table><tr><td>in translation identification, suggesting that read-</td></tr><tr><td>ability features do not have enough discriminative</td></tr><tr><td>power for the prediction task 5 . Adding n-grams</td></tr><tr><td>of tokens and POS tags as features improves the</td></tr><tr><td>performance of the model, leading to 0.75 aver-</td></tr><tr><td>age F-score ([0.71, 0.78] 95% CI) in discriminat-</td></tr><tr><td>ing between English sentences and translations.</td></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "text": "Classification results and 95% bootstrapped confidence intervals for a 2-class prediction problem -original vs. translated text -using readability features." |
| } |
| } |
| } |
| } |