| { |
| "paper_id": "S13-1020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:43:01.823917Z" |
| }, |
| "title": "UPC-CORE: What Can Machine Translation Evaluation Metrics and Wikipedia Do for Estimating Semantic Textual Similarity?", |
| "authors": [ |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Barr\u00f3n-Cede\u00f1o", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universitat Polit\u00e8cnica de Catalunya Jordi Girona", |
| "location": { |
| "addrLine": "Salgado 1-3", |
| "postCode": "08034", |
| "settlement": "Barcelona", |
| "country": "Spain" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universitat Polit\u00e8cnica de Catalunya Jordi Girona", |
| "location": { |
| "addrLine": "Salgado 1-3", |
| "postCode": "08034", |
| "settlement": "Barcelona", |
| "country": "Spain" |
| } |
| }, |
| "email": "lluism@lsi.upc.edu" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Fuentes", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universitat Polit\u00e8cnica de Catalunya Jordi Girona", |
| "location": { |
| "addrLine": "Salgado 1-3", |
| "postCode": "08034", |
| "settlement": "Barcelona", |
| "country": "Spain" |
| } |
| }, |
| "email": "mfuentes@lsi.upc.edu" |
| }, |
| { |
| "first": "Horacio", |
| "middle": [], |
| "last": "Rodr\u00edguez", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universitat Polit\u00e8cnica de Catalunya Jordi Girona", |
| "location": { |
| "addrLine": "Salgado 1-3", |
| "postCode": "08034", |
| "settlement": "Barcelona", |
| "country": "Spain" |
| } |
| }, |
| "email": "horacio@lsi.upc.edu" |
| }, |
| { |
| "first": "Jordi", |
| "middle": [], |
| "last": "Turmo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universitat Polit\u00e8cnica de Catalunya Jordi Girona", |
| "location": { |
| "addrLine": "Salgado 1-3", |
| "postCode": "08034", |
| "settlement": "Barcelona", |
| "country": "Spain" |
| } |
| }, |
| "email": "turmo@lsi.upc.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper we discuss our participation to the 2013 Semeval Semantic Textual Similarity task. Our core features include (i) a set of metrics borrowed from automatic machine translation, originally intended to evaluate automatic against reference translations and (ii) an instance of explicit semantic analysis, built upon opening paragraphs of Wikipedia 2010 articles. Our similarity estimator relies on a support vector regressor with RBF kernel. Our best approach required 13 machine translation metrics + explicit semantic analysis and ranked 65 in the competition. Our postcompetition analysis shows that the features have a good expression level, but overfitting and-mainly-normalization issues caused our correlation values to decrease.", |
| "pdf_parse": { |
| "paper_id": "S13-1020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper we discuss our participation to the 2013 Semeval Semantic Textual Similarity task. Our core features include (i) a set of metrics borrowed from automatic machine translation, originally intended to evaluate automatic against reference translations and (ii) an instance of explicit semantic analysis, built upon opening paragraphs of Wikipedia 2010 articles. Our similarity estimator relies on a support vector regressor with RBF kernel. Our best approach required 13 machine translation metrics + explicit semantic analysis and ranked 65 in the competition. Our postcompetition analysis shows that the features have a good expression level, but overfitting and-mainly-normalization issues caused our correlation values to decrease.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Our participation to the 2013 Semantic Textual Similarity task (STS) (Agirre et al., 2013) 1 was focused on the CORE problem: GIVEN TWO SENTENCES, s 1 AND s 2 , QUANTIFIABLY INFORM ON HOW SIMI-LAR s 1 AND s 2 ARE. We considered real-valued features from four different sources: (i) a set of linguistic measures computed with the Asiya Toolkit for Automatic MT Evaluation (Gim\u00e9nez and M\u00e0rquez, 2010b) , (ii) an instance of explicit semantic analysis (Gabrilovich and Markovitch, 2007) , built on top of Wikipedia articles, (iii) a dataset predictor, and (iv) a subset of the features available in Takelab's Semantic Text Similarity system (\u0160ari\u0107 et al., 2012 ).", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 90, |
| "text": "(Agirre et al., 2013)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 371, |
| "end": 399, |
| "text": "(Gim\u00e9nez and M\u00e0rquez, 2010b)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 449, |
| "end": 483, |
| "text": "(Gabrilovich and Markovitch, 2007)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 638, |
| "end": 657, |
| "text": "(\u0160ari\u0107 et al., 2012", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "1 http://ixa2.si.ehu.es/sts/ Our approaches obtained an overall modest result compared to other participants (best position: 65 out of 89). Nevertheless, our post-competition analysis shows that the low correlation was caused mainly by a deficient data normalization strategy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The paper distribution is as follows. Section 2 offers a brief overview of the task. Section 3 describes our approach. Section 4 discuss our experiments and obtained results. Section 5 provides conclusions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Detecting two similar text fragments is a difficult task in cases where the similarity occurs at semantic level, independently of the implied lexicon (e.g in cases of dense paraphrasing). As a result, similarity estimation models must involve features other than surface aspects. The STS task is proposed as a challenge focused in short English texts of different nature: from automatic machine translation alternatives to human descriptions of short videos. The test partition also included texts extracted from news headlines and FrameNet-Wordnet pairs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Overview", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The range of similarity was defined between 0 (no relation) up to 5 (semantic equivalence). The gold standard values were averaged from different human-made annotations. The expected system's output was composed of a real similarity value, together with an optional confidence level (our confidence level was set constant). Table 1 gives an overview of the development (2012 training and test) and test datasets. Note that both collections extracted from SMT data are highly biased towards the maximum similarity values (more than 75% of the instances have a similar- ity higher than 4) and include the longest instances. On the other hand, the FNWN instances are shifted towards low similarity levels (more than 60% have a similarity lower than 2).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 324, |
| "end": 331, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Task Overview", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our similarity assessment model relies upon SVM light 's support vector regressor, with RBF kernel (Joachims, 1999 ). 2 Our model estimation procedure consisted of two steps: parameter definition and backward elimination-based feature selection. The considered features belong to four families, briefly described in the following subsections.", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 114, |
| "text": "(Joachims, 1999", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We consider a set of linguistic measures originally intended to evaluate the quality of automatic translation systems. These measures compute the quality of a translation by comparing it against one or several reference translations, considered as gold standard. A straightforward application of these measures to the problem at hand is to consider s 1 as the reference and s 2 as the automatic translation, or vice versa. Some of the metrics are not symmetric so we compute similarity between s 1 and s 2 in both directions and average the resulting scores. The measures are computed with the Asiya Toolkit for Automatic MT Evaluation (Gim\u00e9nez and M\u00e0rquez, 2010b) . The only pre-processing carried out was tokenization (Asiya performs additional inbox pre-processing operations, though). We consid-ered a sample from three similarity families, which was proposed in (Gim\u00e9nez and M\u00e0rquez, 2010a) as a varied and robust metric set, showing good correlation with human assessments. 3", |
| "cite_spans": [ |
| { |
| "start": 636, |
| "end": 664, |
| "text": "(Gim\u00e9nez and M\u00e0rquez, 2010b)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 867, |
| "end": 895, |
| "text": "(Gim\u00e9nez and M\u00e0rquez, 2010a)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine Translation Evaluation Metrics", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Lexical Similarity Two metrics of Translation Error Rate (Snover et al., 2006 ) (i.e. the estimated human effort to convert s 1 into s 2 ): -TER and -TER pA . Two measures of lexical precision: BLEU (Papineni et al., 2002) and NIST (Doddington, 2002) . One measure of lexical recall: ROUGE W (Lin and Och, 2004) . Finally, four variants of METEOR (Banerjee and Lavie, 2005) (exact, stemming, synonyms, and paraphrasing), a lexical metric accounting for F -Measure.", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 77, |
| "text": "(Snover et al., 2006", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 199, |
| "end": 222, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 227, |
| "end": 250, |
| "text": "NIST (Doddington, 2002)", |
| "ref_id": null |
| }, |
| { |
| "start": 292, |
| "end": 311, |
| "text": "(Lin and Och, 2004)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine Translation Evaluation Metrics", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Syntactic Similarity Three metrics that estimate the similarity of the sentences over dependency parse trees (Liu and Gildea, 2005) : DP-HWCMi c -4 for grammatical categories chains, DP-HWCMi r -4 over grammatical relations, and DP-O r (\u22c6) over words ruled by non-terminal nodes. Also, one measure that estimates the similarity over constituent parse trees: CP-STM 4 (Liu and Gildea, 2005) . ", |
| "cite_spans": [ |
| { |
| "start": 109, |
| "end": 131, |
| "text": "(Liu and Gildea, 2005)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 367, |
| "end": 389, |
| "text": "(Liu and Gildea, 2005)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine Translation Evaluation Metrics", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We built an instance of Explicit Semantic Analysis (ESA) (Gabrilovich and Markovitch, 2007) with the first paragraph of 100k Wikipedia articles (dump from 2010).Pre-processing consisted of tokenization and lemmatization.", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 91, |
| "text": "(Gabrilovich and Markovitch, 2007)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explicit Semantic Analysis", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Given the similarity shifts in the different datasets (cf. Table 1), we tried to predict what dataset an instance belonged to on the basis of its vocabulary. We built binary maxent classifiers for each dataset in the development set, resulting in five dataset likelihood features: dMSRpar, dSMTeuroparl, dMSRvid, dOnWN, and dSMTnews. 4 Pre-processing consisted of tokenization and lemmatization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Prediction", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We considered the features included in the Takelab Semantic Text Similarity system (\u0160ari\u0107 et al., 2012), one of the top-systems in last year competition. This system is used as a black box. The resulting features are named tklab n, where n = [1, 21].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Our runs departed from three increasing subsets of features: AE machine translation evaluation metrics and explicit semantic analysis, AED the previous set plus dataset prediction, and AED T the previous set plus Takelab's baseline features (cf. Table 3). We performed a feature normalization, which relied on the different feature's distribution over the entire dataset. Firstly, features were bounded in the range \u00b5\u00b13 * \u03c3 2 in order to reduce the potentially negative impact of outliers. Secondly, we normalized according to the z-score (Nardo et al., 2008, pp. 28, 84) ; i.e. x = (x \u2212 \u00b5)/\u03c3. As a result, each realvalued feature distribution in the dataset has \u00b5 = 0 and \u03c3 = 1. During the model tuning stage we tried with other numerous normalization options: normalizing each dataset independently, together with the training set, and without normalization at all. Normalizing according to the entire dev-test dataset led to the best results ", |
| "cite_spans": [ |
| { |
| "start": 539, |
| "end": 571, |
| "text": "(Nardo et al., 2008, pp. 28, 84)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Section 4.1 describes our model tuning strategy. Sections 4.2 and 4.3 discuss the official and postcompetition results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We used only the dev-train partition (2012 training) for tuning. By means of a 10-fold cross validation process, we defined the trade-off (c), gamma (\u03b3), and tube width (\u01eb) parameters for the regressor and performed a backward-elimination feature selection process (Witten and Frank, 2005, p. 294) , independently for the three experiments.", |
| "cite_spans": [ |
| { |
| "start": 265, |
| "end": 297, |
| "text": "(Witten and Frank, 2005, p. 294)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Tuning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The results for the cross-validation process are summarized in Table 2 . The three runs allow for correlations higher than 0.8. On the one hand, the best regressor parameters obtain better results as more features are considered, still with very small differences. On the other hand, the low correlation increase after the feature selection step shows that a few features are indeed irrelevant.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 63, |
| "end": 70, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Tuning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "A summary of the features considered in each experiment (also after feature selection) is displayed in Table 3 . The correlation obtained over the dev-test partition are corr AE = 0.7269, corr AED = 0.7638, and corr AED T = 0.8044 -it would have appeared in the top-10 ranking of the 2012 competition.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 103, |
| "end": 110, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Tuning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We trained three new regressors with the features considered relevant by the tuning process, but using the entire development dataset. The test 2013 partition was normalized again by means of z-score, considering the means and standard deviations of the entire test dataset. Table 4 displays the official results. Our best approach -AE-, was positioned in rank 65. The worst results of run AED can be explained by the difference in the nature of the test respect to the development dataset. AED T obtains worst results than AE on the headlines and SMT datasets. The reason behind this behavior can be in the difference of vocabularies respect to that stored in the Takelab system (it includes only the vocabulary of the development partition). This could be the same reason behind the drop in performance with respect to the results previously obtained on the dev-test partition (cf. Section 4.1).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 275, |
| "end": 282, |
| "text": "Table 4", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Official Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Our analysis of the official results showed the main issue was normalization. Thus, we performed a manifold of new experiments, using the same configuration as in run AE, but applying other normalization strategies: (a) z-score normalization, but ignoring the FNWN dataset (given its shift through low values); (b) z-score normalization, but considering independent means and standard deviations for each test dataset; and (c) without normalizing any of dataset (including the regressor one). Table 5 includes the results. (a) makes evident that the instances in FNWN represent \"anomalies\" that harm the normalized values of the rest of subsets. Run (b) shows that normalizing the test sets independently is not a good option, as the regressor is trained considering overall normalizations, which explains the correlation decrease. Run (c) is completely different: not normalizing any datasetboth in development and test-reduces the influence of the datasets to each other and allows for the best results. Indeed, this configuration would have advanced practically forty positions at competition time, locating us in rank 27.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 493, |
| "end": 500, |
| "text": "Table 5", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Post-Competition Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Estimating the adequate similarities over FNWN seems particularly difficult for our systems. We observe two main factors. (i) FNWN presents an important similarity shift respect to the other datasets: nearly 90% of the instances similarity is lower than 2.5 and (ii) the average lengths of s 1 and s 2 are very different: 30 vs 9 words. These characteristics made it difficult for our MT evaluation metrics to estimate proper similarity values (be normalized or not).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-Competition Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We performed two more experiments over FNWN: training regressors with ESA as the only feature, before and after normalization. The correlation was 0.16017 and 0.3113, respectively. That is, the normalization mainly affects the MT features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-Competition Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In this paper we discussed on our participation to the 2013 Semeval Semantic Textual Similarity task. Our approach relied mainly upon a combination of automatic machine translation evaluation metrics and explicit semantic analysis. Building an RBF support vector regressor with these features allowed us for a modest result in the competition (our best run was ranked 65 out of 89).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We also tried with linear kernels, but RBF always obtained better results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Asiya is available at http://asiya.lsi.upc.edu. Full descriptions of the metrics are available in the Asiya Technical Manual v2.0, pp. 15-21.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We used the Stanford classifier; http://nlp. stanford.edu/software/classifier.shtml", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the organizers of this challenging task for their efforts.This research work was partially carried out during the tenure of an ERCIM \"Alain Bensoussan\" Fellowship. The research leading to these results received funding from the EU FP7 Programme 2007-2013 (grants 246016 and 247762). Our research work is partially supported by the Spanish research projects OpenMT-2 and SKATER (TIN2009-14675-C03, TIN2012-38584-C06-01).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "SEM 2013 Shared Task: Semantic Textual Similarity, including a Pilot on Typed-Similarity. In *SEM 2013: The Second Joint Conference on Lexical and Computational Semantics", |
| "authors": [ |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| }, |
| { |
| "first": "Aitor", |
| "middle": [], |
| "last": "Gonzalez-Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eneko Agirre, Daniel Cer, Mona Diab, Aitor Gonzalez- Agirre, and Weiwei Guo. 2013. *SEM 2013 Shared Task: Semantic Textual Similarity, including a Pilot on Typed-Similarity. In *SEM 2013: The Second Joint Conference on Lexical and Computational Semantics. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "METEOR: An Automatic Metric for MT Evaluation with Improved Correlation with Human Judgments", |
| "authors": [ |
| { |
| "first": "Satanjeev", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| }, |
| { |
| "first": ";", |
| "middle": [], |
| "last": "Goldstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "65--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An Automatic Metric for MT Evaluation with Im- proved Correlation with Human Judgments. In Gold- stein et al. (Goldstein et al., 2005), pages 65-72.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Automatic Evaluation of Machine Translation Quality Using N-Gram Cooccurrence Statistics", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Doddington", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the Second International Conference on Human Language Technology Research", |
| "volume": "", |
| "issue": "", |
| "pages": "138--145", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Doddington. 2002. Automatic Evaluation of Machine Translation Quality Using N-Gram Co- occurrence Statistics. In Proceedings of the Second International Conference on Human Language Tech- nology Research, pages 138-145, San Francisco, CA. Morgan Kaufmann Publishers Inc.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Computing Semantic Relatedness Using Wikipedia-based Explicit Semantic Analysis", |
| "authors": [ |
| { |
| "first": "Evgeniy", |
| "middle": [], |
| "last": "Gabrilovich", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaul", |
| "middle": [], |
| "last": "Markovitch", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 20th International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "1606--1611", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Evgeniy Gabrilovich and Shaul Markovitch. 2007. Com- puting Semantic Relatedness Using Wikipedia-based Explicit Semantic Analysis. In Proceedings of the 20th International Joint Conference on Artificial Intel- ligence, pages 1606-1611, San Francisco, CA, USA. Morgan Kaufmann Publishers Inc.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Asiya: An Open Toolkit for Automatic Machine Translation (Meta-)Evaluation", |
| "authors": [ |
| { |
| "first": "Jes\u00fas", |
| "middle": [], |
| "last": "Gim\u00e9nez", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "The Prague Bulletin of Mathematical Linguistics", |
| "volume": "", |
| "issue": "94", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jes\u00fas Gim\u00e9nez and Llu\u00eds M\u00e0rquez. 2010a. Asiya: An Open Toolkit for Automatic Machine Translation (Meta-)Evaluation. The Prague Bulletin of Mathemat- ical Linguistics, (94).", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Linguistic Measures for Automatic Machine Translation Evaluation", |
| "authors": [ |
| { |
| "first": "Jes\u00fas", |
| "middle": [], |
| "last": "Gim\u00e9nez", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Machine Translation", |
| "volume": "24", |
| "issue": "3-4", |
| "pages": "209--240", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jes\u00fas Gim\u00e9nez and Llu\u00eds M\u00e0rquez. 2010b. Linguistic Measures for Automatic Machine Translation Evalua- tion. Machine Translation, 24(3-4):209-240.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization. Association for Computational Linguistics", |
| "authors": [], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jade Goldstein, Alon Lavie, Chin-Yew Lin, and Clare Voss, editors. 2005. Proceedings of the ACL Work- shop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Advances in Kernel Methods -Support Vector Learning, chapter Making large-Scale SVM Learning Practical", |
| "authors": [ |
| { |
| "first": "Thorsten", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thorsten Joachims, 1999. Advances in Kernel Methods - Support Vector Learning, chapter Making large-Scale SVM Learning Practical. MIT Press.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence and Skip-Bigram Statistics", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Franz Josef", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin and Franz Josef Och. 2004. Auto- matic Evaluation of Machine Translation Quality Us- ing Longest Common Subsequence and Skip-Bigram Statistics. In Proceedings of the 42nd Annual Meet- ing of the Association for Computational Linguistics (ACL 2002), Stroudsburg, PA. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Syntactic Features for Evaluation of Machine Translation", |
| "authors": [ |
| { |
| "first": "Ding", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Gildea", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "25--32", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ding Liu and Daniel Gildea. 2005. Syntactic Features for Evaluation of Machine Translation. In Goldstein et al. (Goldstein et al., 2005), pages 25-32.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Handbook on Constructing Composite Indicators: Methodology and User Guide", |
| "authors": [ |
| { |
| "first": "Michela", |
| "middle": [], |
| "last": "Nardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Michaela", |
| "middle": [], |
| "last": "Saisana", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Saltelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefano", |
| "middle": [], |
| "last": "Tarantola", |
| "suffix": "" |
| }, |
| { |
| "first": "Anders", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Enrico", |
| "middle": [], |
| "last": "Giovannini", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michela Nardo, Michaela Saisana, Andrea Saltelli, Ste- fano Tarantola, Anders Hoffmann, and Enrico Giovan- nini. 2008. Handbook on Constructing Composite In- dicators: Methodology and User Guide. OECD Pub- lishing.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "BLEU: A Method for Automatic Evaluation of Machine Translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics (ACL 2002)", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: A Method for Automatic Evaluation of Machine Translation. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics (ACL 2002), pages 311-318, Philadelphia, PA. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A Study of Translation Edit Rate with Targeted Human Annotation", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Snover", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [], |
| "last": "Dorr", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Linnea", |
| "middle": [], |
| "last": "Micciulla", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Makhoul", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of Association for Machine Translation in the Americas", |
| "volume": "", |
| "issue": "", |
| "pages": "223--231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, and John Makhoul. 2006. A Study of Translation Edit Rate with Targeted Human An- notation. In Proceedings of Association for Machine Translation in the Americas, pages 223-231.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "TakeLab: Systems for Measuring Semantic Text", |
| "authors": [ |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Frane\u0161ari\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Mladen", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan\u0161najder", |
| "middle": [], |
| "last": "Karan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bojana Dalbelo", |
| "middle": [], |
| "last": "Ba\u0161i\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "First Joint Conference on Lexical and Computational Semantics (*SEM)", |
| "volume": "", |
| "issue": "", |
| "pages": "441--448", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Frane\u0160ari\u0107, Goran Glava\u0161, Mladen Karan, Jan\u0160najder, and Bojana Dalbelo Ba\u0161i\u0107. 2012. TakeLab: Sys- tems for Measuring Semantic Text. In First Joint Conference on Lexical and Computational Semantics (*SEM), pages 441-448, Montr\u00e9al, Canada. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Data Mining: Practical Machine Learning Tools and Techniques", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ian", |
| "suffix": "" |
| }, |
| { |
| "first": "Eibe", |
| "middle": [], |
| "last": "Witten", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ian H. Witten and Eibe Frank. 2005. Data Mining: Prac- tical Machine Learning Tools and Techniques. Mor- gan Kaufmann, San Francisco, CA, 2 edition.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Similarity Three measures that estimate the similarities over semantic roles (i.e. arguments and adjuncts): SR-O r , SR-M r (\u22c6), and SR-O r (\u22c6). Additionally, two metrics that estimate similarities over discourse representations: DR-O r (\u22c6) and DR-O rp (\u22c6)." |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td/><td/><td/><td colspan=\"3\">similarity distribution</td><td/><td/><td>length</td></tr><tr><td>dataset</td><td colspan=\"5\">instances [0, 1) [1, 2) [2, 3) [3, 4)</td><td colspan=\"3\">[4, 5] mean min max</td></tr><tr><td>dev-[train + test]</td><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>MSRpar</td><td>1,500</td><td>1.20</td><td colspan=\"5\">8.13 17.13 48.73 24.80 17.84</td><td>5</td><td>30</td></tr><tr><td>MSRvid</td><td colspan=\"6\">1,500 31.00 14.13 15.47 20.87 18.53</td><td>6.66</td><td>2</td><td>24</td></tr><tr><td>SMTEuroparl</td><td>1,193</td><td>0.67</td><td>0.42</td><td colspan=\"2\">1.17 12.32</td><td colspan=\"2\">85.4 21.13</td><td>1</td><td>72</td></tr><tr><td>OnWN</td><td>750</td><td>2.13</td><td colspan=\"4\">2.67 10.40 25.47 59.33</td><td>7.57</td><td>1</td><td>34</td></tr><tr><td>SMTnews</td><td>399</td><td>1.00</td><td>0.75</td><td colspan=\"4\">5.51 13.03 79.70 11.72</td><td>2</td><td>28</td></tr><tr><td>test</td><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>headlines</td><td colspan=\"6\">750 15.47 22.00 16.27 24.67 21.60</td><td>7.21</td><td>3</td><td>22</td></tr><tr><td>OnWN</td><td colspan=\"2\">561 36.54</td><td>9.80</td><td colspan=\"3\">7.49 17.11 29.05</td><td>7.17</td><td>5</td><td>22</td></tr><tr><td>FNWN</td><td colspan=\"4\">189 34.39 29.63 28.57</td><td>6.88</td><td colspan=\"2\">0.53 19.90</td><td>3</td><td>71</td></tr><tr><td>SMT</td><td>750</td><td>0.00</td><td>0.27</td><td colspan=\"4\">3.47 20.40 75.87 26.40</td><td>1</td><td>96</td></tr></table>", |
| "text": "Overview of sub-collections in the development and test datasets, including number of instances and distribution of similarity values (in percentage) as well as mean, minimum, and maximum lengths.", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td>run</td><td/><td colspan=\"2\">parameter def.</td><td/><td/><td colspan=\"2\">feature sel.</td></tr><tr><td/><td>c</td><td>\u03b3</td><td>\u01eb</td><td>corr</td><td>b</td><td>e</td><td>corr</td></tr><tr><td>AE</td><td colspan=\"7\">3.7 0.06 0.3 0.8257 19 14 0.8299</td></tr><tr><td>AED</td><td colspan=\"7\">3.8 0.03 0.2 0.8413 24 19 0.8425</td></tr><tr><td colspan=\"8\">AED T 2.9 0.02 0.3 0.8761 45 33 0.8803</td></tr></table>", |
| "text": "Tuning process: parameter definition and feature selection. Number of features at the beginning and end of the feature selection step included.", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td>Feature</td><td>AE AED AED T Feature</td><td>AE AED AED T Feature</td><td>AED T</td></tr><tr><td>DP-HWCM c-4</td><td>METEOR-pa</td><td>tklab 7</td><td/></tr><tr><td>DP-HWCM r-4</td><td>METEOR-st</td><td>tklab 8</td><td/></tr><tr><td>DP-Or( * )</td><td>METEOR-sy</td><td>tklab 9</td><td/></tr><tr><td>CP-STM-4</td><td>ESA</td><td>tklab 10</td><td/></tr><tr><td>SR-Or( * )</td><td>dMSRpar</td><td>tklab 11</td><td/></tr><tr><td>SR-Mr( * )</td><td>dSMTeuroparl</td><td>tklab 12</td><td/></tr><tr><td>SR-Or</td><td>dMSRvid</td><td>tklab 13</td><td/></tr><tr><td>DR-Or( * )</td><td>dOnWN</td><td>tklab 14</td><td/></tr><tr><td>DR-Orp( * )</td><td>dSMTnews</td><td>tklab 15</td><td/></tr><tr><td>BLEU</td><td>tklab 1</td><td>tklab 16</td><td/></tr><tr><td>NIST</td><td>tklab 2</td><td>tklab 17</td><td/></tr><tr><td>-TER</td><td>tklab 3</td><td>tklab 18</td><td/></tr><tr><td>-TERp-A</td><td>tklab 4</td><td>tklab 19</td><td/></tr><tr><td>ROUGE-W</td><td>tklab 5</td><td>tklab 20</td><td/></tr><tr><td>METEOR-ex</td><td>tklab 6</td><td>tklab 21</td><td/></tr></table>", |
| "text": "Features considered at the beginning of each run, represented as empty squares ( ). Filled squares ( ) represent features considered relevant after feature selection.", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td>run</td><td colspan=\"3\">headlines OnWN FNWN</td><td>SMT</td><td>mean</td></tr><tr><td>AE (65)</td><td>0.6092</td><td>0.5679</td><td colspan=\"2\">-0.1268 0.2090 0.4037</td></tr><tr><td colspan=\"2\">AED (83) 0.4136</td><td>0.4770</td><td colspan=\"2\">-0.0852 0.1662 0.3050</td></tr><tr><td colspan=\"2\">AED T (72) 0.5119</td><td>0.6386</td><td colspan=\"2\">-0.0464 0.1235 0.3671</td></tr></table>", |
| "text": "Official results for the three runs (rank included).", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td>run</td><td colspan=\"3\">headlines OnWN FNWN</td><td>SMT</td><td>mean</td></tr><tr><td>AE (a)</td><td>0.6210</td><td>0.5905</td><td colspan=\"3\">-0.0987 0.2990 0.4456</td></tr><tr><td>AE (b)</td><td>0.6072</td><td>0.4767</td><td colspan=\"3\">-0.0113 0.3236 0.4282</td></tr><tr><td>AE (c)</td><td>0.6590</td><td>0.6973</td><td>0.1547</td><td colspan=\"2\">0.3429 0.5208</td></tr></table>", |
| "text": "Post-competition experiments results", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |