| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:43:45.672402Z" |
| }, |
| "title": "BLEU, METEOR, BERTScore: Evaluation of Metrics Performance in Assessing Critical Translation Errors in Sentiment-oriented Text", |
| "authors": [ |
| { |
| "first": "Hadeel", |
| "middle": [], |
| "last": "Saadany", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Wolverhampton", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "h.a.saadany@wlv.ac.uk" |
| }, |
| { |
| "first": "Constantin", |
| "middle": [], |
| "last": "Or\u0203san", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Surrey", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "c.orasan@surrey.ac.uk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Social media companies as well as authorities make extensive use of artificial intelligence (AI) tools to monitor postings of hate speech, celebrations of violence or profanity. Since AI software requires massive volumes of data to train computers, Machine Translation (MT) of the online content is commonly used to process posts written in several languages and hence augment the data needed for training. However, MT mistakes are a regular occurrence when translating sentiment-oriented user-generated content (UGC), especially when a low-resource language is involved. The adequacy of the whole process relies on the assumption that the evaluation metrics used give a reliable indication of the quality of the translation. In this paper, we assess the ability of automatic quality metrics to detect critical machine translation errors which can cause serious misunderstanding of the affect message. We compare the performance of three canonical metrics on meaningless translations where the semantic content is seriously impaired as compared to meaningful translations with a critical error which exclusively distorts the sentiment of the source text. We conclude that there is a need for fine-tuning of automatic metrics to make them more robust in detecting sentiment critical errors.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Social media companies as well as authorities make extensive use of artificial intelligence (AI) tools to monitor postings of hate speech, celebrations of violence or profanity. Since AI software requires massive volumes of data to train computers, Machine Translation (MT) of the online content is commonly used to process posts written in several languages and hence augment the data needed for training. However, MT mistakes are a regular occurrence when translating sentiment-oriented user-generated content (UGC), especially when a low-resource language is involved. The adequacy of the whole process relies on the assumption that the evaluation metrics used give a reliable indication of the quality of the translation. In this paper, we assess the ability of automatic quality metrics to detect critical machine translation errors which can cause serious misunderstanding of the affect message. We compare the performance of three canonical metrics on meaningless translations where the semantic content is seriously impaired as compared to meaningful translations with a critical error which exclusively distorts the sentiment of the source text. We conclude that there is a need for fine-tuning of automatic metrics to make them more robust in detecting sentiment critical errors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Facebook has once apologised after its machine-translation service lead to an arrest of a man from the West Bank whose profile posting in his native dialect that read \"good morning\" was mistranslated as \"attack them\", and later automatically detected by authorities as an incitement to violence 3 . The main danger in this type of MT error is that it changes the author's sentiment, here from positive to a negative or rather aggressive emotion. Research on translation of sentiment by MT systems has shown that users encounter similar mistakes where the sentiment polarity of the source is flipped to its exact opposite due to a mistranslation of a contronym, a dialectical expression, or a missed negation marker, especially in translation of online content of low-resource languages [17] . In machine translation research, the reliability of MT systems is conventionally measured by automatic quality metrics such as BLEU [13] and METEOR [1] . The aim of these automatic quality metrics is to evaluate a translation hypothesis (i.e. the automatic translation) against a reference translation, which is normally produced by a human translator. Good evaluation metrics should have a high correlation with human judgement on the quality of translation. Recently some automatic metrics have achieved a significant correlation with human judgement on the WMT Metrics task datasets (see [7, 8, 12] ). However, research has reported weaker correlation with low human assessment score ranges for segment-level evaluation [20, 19] . These findings point to the challenges involved in detecting low-quality translations by automatic metrics.", |
| "cite_spans": [ |
| { |
| "start": 295, |
| "end": 296, |
| "text": "3", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 786, |
| "end": 790, |
| "text": "[17]", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 925, |
| "end": 929, |
| "text": "[13]", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 941, |
| "end": 944, |
| "text": "[1]", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1384, |
| "end": 1387, |
| "text": "[7,", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1388, |
| "end": 1390, |
| "text": "8,", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1391, |
| "end": 1394, |
| "text": "12]", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1516, |
| "end": 1520, |
| "text": "[20,", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1521, |
| "end": 1524, |
| "text": "19]", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we focus on the problem of evaluating critical translation errors that can cause serious misunderstanding of the sentiment conveyed in the source text. To illustrate this point, suppose we are evaluating the MT output \"People are dead, starving in your presence, may God forgive you\" with its reference \"People are dead, starving in your presence, may God not forgive you\" 4 . The error in the MT output is only the missing of the word not, however, this omission causes the translation to convey the exact opposite sentiment of the source. We argue that such translation errors should be considered more critical than those which produce ungrammatical or low-quality translations, but do not significantly distort the message of the source. However, as we show in this paper, automatic quality metrics fail to give a penalty to this type of critical error proportional to its gravity and may equate this hypothesis with another that also has a uni-gram mistake, but transfers the affect message (e.g People are dead, hungry in your presence, may God not forgive you).", |
| "cite_spans": [ |
| { |
| "start": 387, |
| "end": 388, |
| "text": "4", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this research we conduct an experiment with three canonical automatic quality metrics, BLEU, METEOR and BERTScore [22] . We measure the ability of each metric to penalise sentiment critical errors that severely distort the affect message as compared to translations which correctly transfer the correct sentiment as well as mistranslations that produce incomprehensible content in the target language. We first briefly present the three metrics in section 2. Then, in section 3, we explain our experiment and summarise the results. In section 4, we give our concluding remarks.", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 121, |
| "text": "[22]", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The standard metric for assessing empirical improvement of MT systems is BLEU. Simply stated, the objective of BLEU is to compare n-grams of the candidate translation with n-grams of the reference translation and count the number of matches; the more the matches, the better the candidate translation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The final score is a modified n-gram precision multiplied by a brevity penalty to account for both frequency and adequacy. Due to its restrictive exact matching to the reference, BLEU does not accommodate for importance n-gram weighting which may be essential in assessing a sentiment-critical error. However, despite research evidence of its analytical limitations [9, 16] , BLEU, is still the de facto standard for MT performance evaluation because it is easy to calculate regardless of the languages involved. METEOR, on the other hand, incorporates semantic information as it evaluates translation by calculating either exact match, stem match, or synonymy match. For synonym matching, it utilises WordNet synsets [14] . More recent versions (METEOR 1.5 and METEOR++2.0) apply importance weighting by giving smaller weight to function words [3, 6] . However, the METEOR weighting scheme would not allow for a great penalty of the missing negation marker in the hypothesis of our example above. In fact, the METEOR score for Twitter's MT wrong translation is 0.91, whereas the score for the correct translation (People are dead, starving in your presence, may God not forgive you) is 0.99. The main culprit for this proportionally inaccurate scoring is the function word weighting which causes the metric to be over permissive despite the MT engine missing of a negation marker crucial to the sentiment of the source tweet.", |
| "cite_spans": [ |
| { |
| "start": 366, |
| "end": 369, |
| "text": "[9,", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 370, |
| "end": 373, |
| "text": "16]", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 718, |
| "end": 722, |
| "text": "[14]", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 845, |
| "end": 848, |
| "text": "[3,", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 849, |
| "end": 851, |
| "text": "6]", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Both METEOR and BLEU assess the quality of translation in terms of surface n-gram matching between the MT output and a human reference(s). After the introduction of pretrained contextual word models, there has been a recent trend to use large-scale models like BERT [4] for MT evaluation to incorporate semantic contextual information of tokens in comparing translation and reference segments. A number of embedding-based metrics has proven to achieve the highest performance in recent WMT shared tasks for quality metrics (e.g. [7, 8, 12] ). We take BERTScore as representative of this category. BERTScore computes a score based on a pair wise cosine similarity between the BERT contextual embeddings of the individual tokens for the hypothesis and the reference. Accordingly, a BERTScore close to 1 indicates proximity in vector space and hence a good translation. In the following section, we explain our experiment for assessing the performance of these three metrics with respect to critical translation errors that seriously distort the affect message of the source.", |
| "cite_spans": [ |
| { |
| "start": 266, |
| "end": 269, |
| "text": "[4]", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 529, |
| "end": 532, |
| "text": "[7,", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 533, |
| "end": 535, |
| "text": "8,", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 536, |
| "end": 539, |
| "text": "12]", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We measure the performance of the three metrics on two types of translated UGC data: synthetic and authentic. The synthetic dataset consists of 100 restaurant reviews extracted from the SemEval-2016 Aspect-Based Sentiment Analysis task where each review expresses mixed sentiment about a particular entity [15] . For this dataset we did not use machine translation, but we artificially modified the original texts in such a way that the original sentiment was distorted. Thus, we created hypothesis-reference pairs with changes only in sentiment-related words. The main objective of the synthetic data is to measure the sensitivity of each metric to sentiment-critical translation errors by making n-gram sentiment modifications to the hypothesis while keeping the other words intact. We made four types of sentiment modifications manually. For example, for the source review 'But the staff was so horrible to us', we made the following modifications:", |
| "cite_spans": [ |
| { |
| "start": 306, |
| "end": 310, |
| "text": "[15]", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Compiling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "-One Non-Critical Error: a uni-gram change that does not affect the sentiment ('But the staff was so horrible to him') -One Critical Error: a uni-gram change that produced the opposite sentiment ('But the staff was so nice to us') -Two Errors: a two-words change with one critical and one non-critical error ('But the staff was so nice to him') -Nonsense: a three-words change that produced a meaningless translation ('But the team was so to him') The authentic dataset consisted of 1700 tweets collated from different emotiondetection and aggression-detection shared tasks ( [11, 10, 2, 21] ). The source tweets were in three languages: English (1400), Arabic (200) and Spanish (100). This dataset was translated by Twitter's MT system (Google API). The Spanish and English source tweets were translated into English, and the English tweets were translated into Romanian, Arabic, Spanish and Portuguese. Five human annotators 5 , native speakers of the respective languages, manually annotated the translations for sentiment errors. The annotation was straightforward: Yes the translation transfers the sentiment of the source (even though it can have non-sentiment related errors that do not seriously affect the overall sentiment/emotion) or No, it does not. If 'No', the annotators were asked to mark whether the mistranslation of sentiment is due to one or two linguistic errors. The linguistic error was either a missing negation marker, a mistranslation of a hashtag, an idiomatic expression or a polysemous word (table 1 shows the distribution of the datasets types used in the experiment). More details on how the errors were identified are discussed in [18] .", |
| "cite_spans": [ |
| { |
| "start": 576, |
| "end": 580, |
| "text": "[11,", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 581, |
| "end": 584, |
| "text": "10,", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 585, |
| "end": 587, |
| "text": "2,", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 588, |
| "end": 591, |
| "text": "21]", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 927, |
| "end": 928, |
| "text": "5", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1663, |
| "end": 1667, |
| "text": "[18]", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Compiling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We ran the three metrics on the hypothesis/reference pairs of the synthetic dataset and the hypothesis/reference 6 for Arabic and Spanish tweets, and the source/back-translations of the English tweets of the authentic dataset (The back-translations were checked to make sure they reproduced the exact sentiment errors in the MT output). Accordingly, as shown in table 1, we evaluated 400 synthetic English hypothesis/reference pairs, 1400 English tweets translated into Romanian, Arabic, Spanish and Portuguese, and 300 Arabic and Spanish tweets translated into English. We used these datasets to calculate three measures for BLEU, METEOR and BERTScore: segment-level scores, mean segment-level scores and standard deviation for segment-level scores. Results of the experiment are explained in the next section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Compiling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The average segment scores of the three metrics for the four sentiment modifications we have conducted on the hypotheses of the synthetic dataset is shown in figure 1 . As can be seen from the figure, the difference between the mean score for one critical error and one non-critical error is quite small for all the three metrics (max 3 points difference). This result essentially highlights the inability of the three metrics to distinguish between the mistranslation of a critical word that seriously distorts the affect message and the mistranslation of a non-critical word that does not affect the sentiment content (see table 2 for examples of such cases). The metrics, however, are able to distinguish low-quality translation with a highly distorted content as the average scores for the 'Nonsense' translations are far off from the other types of errors. Furthermore, the average BLEU score for one non-critical error is slightly higher than the one critical error. This is due to the fact that BLEU gauges the performance of an MT model by an indiscriminate n-gram matching, regardless of the semantic weight of each word. An error with a sentiment-critical word, therefore, is equally penalised as any other word. Also, for BERTScore the average score for one critical error is relatively high (0.85) due to what is known as the antonymy problem in contextual word embeddings [5] . Antonyms (e.g. 'great' and 'terrible') usually have similar contextual information and hence are closer in vector space. The change of one word to its exact opposite, therefore, is not adequately captured by the BERTScore metric. It can be claimed, therefore, that the embedding-based metric would generally struggle with hypotheses with only a uni-gram sentiment-critical error that flips the source sentiment to its opposite polarity. Figure 2 shows a similar problem for the authentic English data. For ME-TEOR, a translation that transfers the affect message has a similar average score as translations that have one or two linguistic errors that seriously distort the sentiment of the source. Note that in the authentic 'No Error' dataset, the hypothesis correctly transfers the main content but may have non-sentiment errors and hence METEOR scores may be lower for some hypotheses. However, the METEOR performance casts doubt on its ability to distinguish between a translation that can transmit the sentiment content despite other errors and another translation that has a critical error of the sentiment which would be unacceptable by human standards. By contrast, the average scores of the BERTScore metric correlate consistently with the degradation of the sentiment transfer in this authentic dataset. However, for the second language arc where Arabic/Spanish are the source languages, the difference between METEOR and BERTScore average scores for segments with no sentiment error and those with critical errors is relatively small (7 and 8 points, respectively as shown in figure3).", |
| "cite_spans": [ |
| { |
| "start": 1385, |
| "end": 1388, |
| "text": "[5]", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 158, |
| "end": 166, |
| "text": "figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 625, |
| "end": 632, |
| "text": "table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 1828, |
| "end": 1836, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Finally, figure 4 shows the normalised standard deviation of the segment-level scores for the three metrics on the different datasets. The scores of the three metrics display the highest variation with the authentic dataset with one sentiment error and BERTScore displays a great variance with two sentiment errors in the same dataset. This indicates that translations with sentiment critical errors do not consistently receive low scores by the three metrics. Similarly, both the ME-TEOR and BLEU metrics have a relatively higher deviation in segment-level scores for the synthetic dataset with one critical error. Therefore, hypotheses that are exact match to the reference but have only one critical error causing a misinterpretation of the affect message are not consistently penalised by the two metrics (see table 2 for examples of metric scores for references/hypotheses of the two datasets). ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 9, |
| "end": 17, |
| "text": "figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 809, |
| "end": 821, |
| "text": "(see table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In this research, we conducted an experiment with three canonical automatic quality metrics to evaluate their ability to penalise a critical translation error that seriously distorts the affect message of the source text. The average segmentlevel scores for the three metrics showed that sentiment-critical and non-critical errors are not appropriately distinguishable especially in our synthetic dataset. This shows that in scenarios where the MT output is an exact match to the reference except for one sentiment-pivotal word, the automatic quality metric becomes less sensitive to the mistranslation error. Similarly, with the authentic datasets, the average scores for METEOR showed that mistranslations with one or two critical errors are not appropriately penalised. Moreover, with both the authentic and synthetic data, the relatively high inconsistency of segment-level scores for hypotheses with one or two sentiment-critical errors suggests that a distortion of the sentiment content may misleadingly receive high scores by any of the three metrics. The results of the experiment call attention to the need for a sentiment-targeted evaluation measure that can adequately assess this type of critical translation errors that have can serious consequences in determining the sentiment stance of the author. Our future work will focus on fine-tuning the quality metrics to capture sentiment-critical lexicon to improve its performance with sentiment-oriented text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "https://www.theguardian.com/technology/2017/oct/24/ facebook-palestine-israel-translates-good-morning-attack-them-arrest", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The hypothesis is the mistranslation of Twitter's Translate tab for an Arabic tweet https://twitter.com/ZPNyOawCRVTNBxu/status/878496659793170432, accessed 26 June 2021.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The annotators were computational linguists working on MT research.6 Reference translations were created by the two annotators native speakers of Arabic and Spanish.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Scores in figures are standardised from 0 to 100 for easier display.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "Part of the research done by Hadeel Saadany was carried out in the context of the TranSent project at the University of Surrey.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "METEOR: An automatic metric for MT evaluation with improved correlation with human judgments", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the acl workshop on intrinsic and", |
| "volume": "", |
| "issue": "", |
| "pages": "65--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Banerjee, S., Lavie, A.: METEOR: An automatic metric for MT evaluation with improved correlation with human judgments. In: Proceedings of the acl workshop on intrinsic and extrinsic evaluation measures for machine translation and/or sum- marization. pp. 65-72 (2005)", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Semeval-2019 task 5: Multilingual detection of hate speech against immigrants and women in twitter", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Debora", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "M R" |
| ], |
| "last": "Pardo", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "54--63", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Basile, V., Bosco, C., Fersini, E., Debora, N., Patti, V., Pardo, F.M.R., Rosso, P., Sanguinetti, M., et al.: Semeval-2019 task 5: Multilingual detection of hate speech against immigrants and women in twitter. In: 13th International Workshop on Semantic Evaluation. pp. 54-63. Association for Computational Linguistics (2019)", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Meteor universal: Language specific translation evaluation for any target language", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Denkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the ninth workshop on statistical machine translation", |
| "volume": "", |
| "issue": "", |
| "pages": "376--380", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Denkowski, M., Lavie, A.: Meteor universal: Language specific translation evalua- tion for any target language. In: Proceedings of the ninth workshop on statistical machine translation. pp. 376-380 (2014)", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "W" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirec- tional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Unraveling antonym's word vectors through a siamese-like network", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Etcheverry", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Wonsever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3297--3307", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Etcheverry, M., Wonsever, D.: Unraveling antonym's word vectors through a siamese-like network. In: Proceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics. pp. 3297-3307 (2019)", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Meteor++ 2.0: Adopt syntactic level paraphrase knowledge into machine translation evaluation", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "501--506", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guo, Y., Hu, J.: Meteor++ 2.0: Adopt syntactic level paraphrase knowledge into machine translation evaluation. In: Proceedings of the Fourth Conference on Ma- chine Translation (Volume 2: Shared Task Papers, Day 1). pp. 501-506 (2019)", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Openkiwi: An open source framework for quality estimation", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Kepler", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Tr\u00e9nous", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Treviso", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Vera", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "F" |
| ], |
| "last": "Martins", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1902.08646" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kepler, F., Tr\u00e9nous, J., Treviso, M., Vera, M., Martins, A.F.: Openkiwi: An open source framework for quality estimation. arXiv preprint arXiv:1902.08646 (2019)", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Extended study on using pretrained language models and YiSi-1 for machine translation evaluation", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "K" |
| ], |
| "last": "Lo", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "895--902", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lo, C.k.: Extended study on using pretrained language models and YiSi-1 for machine translation evaluation. In: Proceedings of the Fifth Conference on Machine Translation. pp. 895-902 (2020)", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Tangled up in BLEU: Reevaluating the Evaluation of Automatic Machine Translation Evaluation Metrics", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Mathur", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2006.06264" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mathur, N., Baldwin, T., Cohn, T.: Tangled up in BLEU: Reevaluating the Evaluation of Automatic Machine Translation Evaluation Metrics. arXiv preprint arXiv:2006.06264 (2020)", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Understanding emotions: A dataset of tweets to study interactions between affect categories", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad, S., Kiritchenko, S.: Understanding emotions: A dataset of tweets to study interactions between affect categories. In: Proceedings of the Eleventh Inter- national Conference on Language Resources and Evaluation (LREC 2018) (2018)", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Wassa-2017 shared task on emotion intensity", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Bravo-Marquez", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1708.03700" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad, S.M., Bravo-Marquez, F.: Wassa-2017 shared task on emotion inten- sity. arXiv preprint arXiv:1708.03700 (2017)", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Mee: An automatic metric for evaluation using embeddings for machine translation", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mukherjee", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ala", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Shrivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "M" |
| ], |
| "last": "Sharma", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "2020 IEEE 7th International Conference on Data Science and Advanced Analytics (DSAA)", |
| "volume": "", |
| "issue": "", |
| "pages": "292--299", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mukherjee, A., Ala, H., Shrivastava, M., Sharma, D.M.: Mee: An automatic met- ric for evaluation using embeddings for machine translation. In: 2020 IEEE 7th International Conference on Data Science and Advanced Analytics (DSAA). pp. 292-299. IEEE (2020)", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "J" |
| ], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th annual meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: Bleu: a method for automatic evaluation of machine translation. In: Proceedings of the 40th annual meeting of the Association for Computational Linguistics. pp. 311-318 (2002)", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Wordnet:: Similarity-measuring the relatedness of concepts", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Pedersen", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Patwardhan", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Michelizzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "In: AAAI", |
| "volume": "4", |
| "issue": "", |
| "pages": "25--29", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pedersen, T., Patwardhan, S., Michelizzi, J., et al.: Wordnet:: Similarity-measuring the relatedness of concepts. In: AAAI. vol. 4, pp. 25-29 (2004)", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Semeval-2016 task 5: Aspect based sentiment analysis", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Pontiki", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Galanis", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Papageorgiou", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Androutsopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Manandhar", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Al-Smadi", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Al-Ayyoub", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "De Clercq", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "19--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pontiki, M., Galanis, D., Papageorgiou, H., Androutsopoulos, I., Manandhar, S., Al-Smadi, M., Al-Ayyoub, M., Zhao, Y., Qin, B., De Clercq, O., et al.: Semeval- 2016 task 5: Aspect based sentiment analysis. In: International workshop on se- mantic evaluation. pp. 19-30 (2016)", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A Structured Review of the Validity of BLEU", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Reiter", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Computational Linguistics", |
| "volume": "44", |
| "issue": "3", |
| "pages": "393--401", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reiter, E.: A Structured Review of the Validity of BLEU. Computational Linguis- tics 44(3), 393-401 (2018)", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Is it great or terrible? preserving sentiment in neural machine translation of arabic reviews", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Saadany", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Orasan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Arabic Natural Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "24--37", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saadany, H., Orasan, C.: Is it great or terrible? preserving sentiment in neural machine translation of arabic reviews. In: Proceedings of the Fifth Arabic Natural Language Processing Workshop. pp. 24-37 (2020)", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Challenges in Translation of Emotions in Multilingual User-Generated Content", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Saadany", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Orasan", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "C" |
| ], |
| "last": "Quintana", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Do Carmo", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Zilio", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Twitter as a Case Study", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2106.10719" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saadany, H., Orasan, C., Quintana, R.C., do Carmo, F., Zilio, L.: Challenges in Translation of Emotions in Multilingual User-Generated Content: Twitter as a Case Study. arXiv preprint arXiv:2106.10719 (2021)", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Is this translation error critical?: Classification-based human and automatic machine translation evaluation focusing on critical errors", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Sudoh", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Takahashi", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the Workshop on Human Evaluation of NLP Systems (HumEval)", |
| "volume": "", |
| "issue": "", |
| "pages": "46--55", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sudoh, K., Takahashi, K., Nakamura, S.: Is this translation error critical?: Classification-based human and automatic machine translation evaluation focusing on critical errors. In: Proceedings of the Workshop on Human Evaluation of NLP Systems (HumEval). pp. 46-55 (2021)", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Automatic machine translation evaluation using source language inputs and cross-lingual language model", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Takahashi", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Sudoh", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3553--3558", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Takahashi, K., Sudoh, K., Nakamura, S.: Automatic machine translation evaluation using source language inputs and cross-lingual language model. In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics. pp. 3553-3558 (2020)", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Semeval-2020 task 12: Multilingual offensive language identification in social media (offenseval 2020)", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Atanasova", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Karadzhov", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Mubarak", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Pitenis", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00c7", |
| "middle": [], |
| "last": "\u00c7\u00f6ltekin", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2006.07235" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zampieri, M., Nakov, P., Rosenthal, S., Atanasova, P., Karadzhov, G., Mubarak, H., Derczynski, L., Pitenis, Z., \u00c7\u00f6ltekin, \u00c7 .: Semeval-2020 task 12: Multilingual offensive language identification in social media (offenseval 2020). arXiv preprint arXiv:2006.07235 (2020)", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Bertscore: Evaluating text generation with Bert", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Kishore", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "Q" |
| ], |
| "last": "Weinberger", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1904.09675" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, T., Kishore, V., Wu, F., Weinberger, K.Q., Artzi, Y.: Bertscore: Evaluating text generation with Bert. arXiv preprint arXiv:1904.09675 (2019)", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "text": "Mean Scores for Synthetic Data 7 Fig. 2: Mean Scores for Authentic Data (en) Mean Scores for Authentic Data (ar/sp) Normalised Standard Deviation", |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>Dataset</td><td colspan=\"4\">No Error One Error Two Errors Nonsense</td></tr><tr><td>Synthetic En to En</td><td/><td>200</td><td>100</td><td>100</td></tr><tr><td>Total</td><td/><td>400</td><td/></tr><tr><td>Authentic En to Sp/Ar/Pt/Ro</td><td>854</td><td>404</td><td>142</td></tr><tr><td>Authentic Sp/Ar to En</td><td>150</td><td>150</td><td/></tr><tr><td>Total</td><td/><td>1700</td><td/></tr></table>", |
| "html": null, |
| "text": "Distribution of Translation of Sentiment Errors for the Datasets" |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>Synthetic Data</td><td/><td>Metric</td><td/><td/></tr><tr><td/><td/><td colspan=\"3\">BLEU METEOR BERTScore</td></tr><tr><td>Ref</td><td>Their pizza is the best, if you like thin crusted pizza.</td><td>1.0</td><td>1.0</td><td>1.0</td></tr><tr><td>Non-critical Error</td><td>Their pizza is the best, if you like thin layer pizza.</td><td colspan=\"2\">0.76 0.50</td><td>0.90</td></tr><tr><td>Critical Error</td><td>Their pizza is the worst, if you like thin crusted pizza.</td><td colspan=\"2\">0.73 0.50</td><td>0.86</td></tr><tr><td>Authentic Data</td><td/><td/><td/><td/></tr><tr><td>Ref</td><td>What is this amount of happiness, I don't understand!</td><td>1.0</td><td>1.0</td><td>1.0</td></tr><tr><td>One Error</td><td>What is this amount of anger , I don't get it!</td><td colspan=\"2\">0.65 0.47</td><td>0.89</td></tr><tr><td>Ref</td><td>Sweetie like clouds, always fill me with joy.</td><td>1.0</td><td>1.0</td><td>1.0</td></tr><tr><td>No Error</td><td>My love is like clouds, always fill me with joy.</td><td colspan=\"2\">0.65 0.44</td><td>0.52</td></tr></table>", |
| "html": null, |
| "text": "Examples of Metric Scores for Different Error Types" |
| } |
| } |
| } |
| } |