| { |
| "paper_id": "S13-1032", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:42:12.905309Z" |
| }, |
| "title": "CPN-CORE: A Text Semantic Similarity System Infused with Opinion Knowledge", |
| "authors": [ |
| { |
| "first": "Carmen", |
| "middle": [], |
| "last": "Banea", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "TX \u221a Cornell University Ithaca", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "carmen.banea@gmail.com" |
| }, |
| { |
| "first": "Yoonjung", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "TX \u221a Cornell University Ithaca", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Lingjia", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "TX \u221a Cornell University Ithaca", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Samer", |
| "middle": [], |
| "last": "Hassan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "TX \u221a Cornell University Ithaca", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Mohler", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "TX \u221a Cornell University Ithaca", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Bishan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "TX \u221a Cornell University Ithaca", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "TX \u221a Cornell University Ithaca", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "TX \u221a Cornell University Ithaca", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "TX \u221a Cornell University Ithaca", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This article provides a detailed overview of the CPN text-to-text similarity system that we participated with in the Semantic Textual Similarity task evaluations hosted at *SEM 2013. In addition to more traditional components, such as knowledge-based and corpus-based metrics leveraged in a machine learning framework, we also use opinion analysis features to achieve a stronger semantic representation of textual units. While the evaluation datasets are not designed to test the similarity of opinions, as a component of textual similarity, nonetheless, our system variations ranked number 38, 39 and 45 among the 88 participating systems.", |
| "pdf_parse": { |
| "paper_id": "S13-1032", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This article provides a detailed overview of the CPN text-to-text similarity system that we participated with in the Semantic Textual Similarity task evaluations hosted at *SEM 2013. In addition to more traditional components, such as knowledge-based and corpus-based metrics leveraged in a machine learning framework, we also use opinion analysis features to achieve a stronger semantic representation of textual units. While the evaluation datasets are not designed to test the similarity of opinions, as a component of textual similarity, nonetheless, our system variations ranked number 38, 39 and 45 among the 88 participating systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Measures of text similarity have been used for a long time in applications in natural language processing and related areas. One of the earliest applications of text similarity is perhaps the vector-space model used in information retrieval, where the document most relevant to an input query is determined by ranking documents in a collection in reversed order of their angular distance with the given query (Salton and Lesk, 1971) . Text similarity has also been used for relevance feedback and text classification (Rocchio, 1971) , word sense disambiguation (Lesk, 1986; Schutze, 1998) , and extractive summarization , in the automatic evaluation of machine translation (Papineni et al., 2002) , text summarization (Lin and Hovy, 2003) , text coherence (Lapata and Barzilay, 2005) and in plagiarism detection (Nawab et al., 2011) . Earlier work on this task has primarily focused on simple lexical matching methods, which produce a similarity score based on the number of lexical units that occur in both input segments. Improvements to this simple method have considered stemming, stopword removal, part-of-speech tagging, longest subsequence matching, as well as various weighting and normalization factors . While successful to a certain degree, these lexical similarity methods cannot always identify the semantic similarity of texts. For instance, there is an obvious similarity between the text segments \"she owns a dog\" and \"she has an animal,\" yet these methods will mostly fail to identify it.", |
| "cite_spans": [ |
| { |
| "start": 409, |
| "end": 432, |
| "text": "(Salton and Lesk, 1971)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 517, |
| "end": 532, |
| "text": "(Rocchio, 1971)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 561, |
| "end": 573, |
| "text": "(Lesk, 1986;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 574, |
| "end": 588, |
| "text": "Schutze, 1998)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 673, |
| "end": 696, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 718, |
| "end": 738, |
| "text": "(Lin and Hovy, 2003)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 756, |
| "end": 783, |
| "text": "(Lapata and Barzilay, 2005)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 812, |
| "end": 832, |
| "text": "(Nawab et al., 2011)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "More recently, researchers have started to consider the possibility of combining the large number of word-to-word semantic similarity measures (e.g., (Jiang and Conrath, 1997; Leacock and Chodorow, 1998; Lin, 1998; Resnik, 1995) ) within a semantic similarity method that works for entire texts. The methods proposed to date in this direction mainly consist of either bipartite-graph matching strategies that aggregate word-to-word similarity into a text similarity score (Mihalcea et al., 2006; Islam and Inkpen, 2009; Hassan and Mihalcea, 2011; Mohler et al., 2011) , or data-driven methods that perform component-wise additions of semantic vector representations as obtained with corpus measures such as latent semantic analysis (Landauer et al., 1997) , explicit semantic analysis (Gabrilovich and Markovitch, 2007) , or salient semantic analysis (Hassan and Mihalcea, 2011) .", |
| "cite_spans": [ |
| { |
| "start": 150, |
| "end": 175, |
| "text": "(Jiang and Conrath, 1997;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 176, |
| "end": 203, |
| "text": "Leacock and Chodorow, 1998;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 204, |
| "end": 214, |
| "text": "Lin, 1998;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 215, |
| "end": 228, |
| "text": "Resnik, 1995)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 472, |
| "end": 495, |
| "text": "(Mihalcea et al., 2006;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 496, |
| "end": 519, |
| "text": "Islam and Inkpen, 2009;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 520, |
| "end": 546, |
| "text": "Hassan and Mihalcea, 2011;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 547, |
| "end": 567, |
| "text": "Mohler et al., 2011)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 732, |
| "end": 755, |
| "text": "(Landauer et al., 1997)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 785, |
| "end": 819, |
| "text": "(Gabrilovich and Markovitch, 2007)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 851, |
| "end": 878, |
| "text": "(Hassan and Mihalcea, 2011)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we describe the system variations with which we participated in the *SEM 2013 task on semantic textual similarity (Agirre et al., 2013) . The system builds upon our earlier work on corpusbased and knowledge-based methods of text semantic similarity (Mihalcea et al., 2006; Hassan and Mihalcea, 2011; Mohler et al., 2011; Banea et al., 2012) , while also incorporating opinion aware features. Our observation is that text is not only similar on a semantic level, but also with respect to opinions. Let us consider the following text segments: \"she owns a dog\" and \"I believe she owns a dog.\" The question then becomes how similar these text fragments truly are. Current systems will consider the two sentences semantically equivalent, yet to a human, they are not. A belief is not equivalent to a fact (and for the case in point, the person may very well have a cat or some other pet), and this should consequently lower the relatedness score. For this reason, we advocate that STS systems should also consider the opinions expressed and their equivalence. While the *SEM STS task is not formulated to evaluate this type of similarity, we complement more traditional corpus and knowledge-based methods with opinion aware features, and use them in a meta-learning framework in an arguably first attempt at incorporating this type of information to infer text-to-text similarity.", |
| "cite_spans": [ |
| { |
| "start": 129, |
| "end": 150, |
| "text": "(Agirre et al., 2013)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 264, |
| "end": 287, |
| "text": "(Mihalcea et al., 2006;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 288, |
| "end": 314, |
| "text": "Hassan and Mihalcea, 2011;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 315, |
| "end": 335, |
| "text": "Mohler et al., 2011;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 336, |
| "end": 355, |
| "text": "Banea et al., 2012)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Over the past years, the research community has focused on computing semantic relatedness using methods that are either knowledge-based or corpusbased. Knowledge-based methods derive a measure of relatedness by utilizing lexical resources and ontologies such as WordNet (Miller, 1995) to measure definitional overlap, term distance within a graphical taxonomy, or term depth in the taxonomy as a measure of specificity. We explore several of these measures in depth in Section 3.3.1. On the other side, corpus-based measures such as Latent Semantic Analysis (LSA) (Landauer et al., 1997) , Explicit Semantic Analysis (ESA) (Gabrilovich and Markovitch, 2007) , Salient Semantic Analysis (SSA) (Hassan and Mihalcea, 2011) , Pointwise Mutual Information (PMI) (Church and Hanks, 1990) , PMI-IR (Turney, 2001) , Second Order PMI (Islam and Inkpen, 2006) , Hyperspace Analogues to Language (Burgess et al., 1998) and distributional similarity (Lin, 1998) employ probabilistic approaches to decode the semantics of words. They consist of unsupervised methods that utilize the contextual information and patterns observed in raw text to build semantic profiles of words. Unlike knowledgebased methods, which suffer from limited coverage, corpus-based measures are able to induce a similarity between any given two words, as long as they appear in the very large corpus used as training.", |
| "cite_spans": [ |
| { |
| "start": 270, |
| "end": 284, |
| "text": "(Miller, 1995)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 564, |
| "end": 587, |
| "text": "(Landauer et al., 1997)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 623, |
| "end": 657, |
| "text": "(Gabrilovich and Markovitch, 2007)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 692, |
| "end": 719, |
| "text": "(Hassan and Mihalcea, 2011)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 757, |
| "end": 781, |
| "text": "(Church and Hanks, 1990)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 791, |
| "end": 805, |
| "text": "(Turney, 2001)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 825, |
| "end": 849, |
| "text": "(Islam and Inkpen, 2006)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 885, |
| "end": 907, |
| "text": "(Burgess et al., 1998)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 938, |
| "end": 949, |
| "text": "(Lin, 1998)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The STS task consists of labeling one sentence pair at a time, based on the semantic similarity existent between its two component sentences. Human assigned similarity scores range from 0 (no relation) to 5 (semantivally equivalent). The *SEM 2013 STS task did not provide additional labeled data to the training and testing sets released as part of the STS task hosted at SEMEVAL 2012 (Agirre et al., 2012) ; our system variations were trained on SEMEVAL 2012 data.", |
| "cite_spans": [ |
| { |
| "start": 386, |
| "end": 407, |
| "text": "(Agirre et al., 2012)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Setup", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The test sets (Agirre et al., 2013) consist of text pairs extracted from headlines (headlines, 750 pairs), sense definitions from WordNet and OntoNotes (OnWN, 561 pairs), sense definitions from WordNet and FrameNet (FNWN, 189 pairs), and data used in the evaluation of machine translation systems (SMT, 750 pairs).", |
| "cite_spans": [ |
| { |
| "start": 14, |
| "end": 35, |
| "text": "(Agirre et al., 2013)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Setup", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Various subparts of our framework use several resources that are described in more detail below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Resources", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Wikipedia 1 is the most comprehensive encyclopedia to date, and it is an open collaborative effort hosted on-line. Its basic entry is an article which in addition to describing an entity or an event also contains hyperlinks to other pages within or outside of Wikipedia. This structure (articles and hyperlinks) is directly exploited by semantic similarity methods such as ESA (Gabrilovich and Markovitch, 2007) , or SSA (Hassan and Mihalcea, 2011) 2 .", |
| "cite_spans": [ |
| { |
| "start": 377, |
| "end": 411, |
| "text": "(Gabrilovich and Markovitch, 2007)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Resources", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "WordNet (Miller, 1995) is a manually crafted lexical resource that maintains semantic relationships such as synonymy, antonymy, hypernymy, etc., between basic units of meaning, or synsets. These relationships are employed by various knowledge-based methods to derive semantic similarity.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 22, |
| "text": "(Miller, 1995)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Resources", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The MPQA corpus ) is a newswire data set that was manually annotated at the expression level for opinion-related content. Some of the features derived by our opinion extraction models were based on training on this corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Resources", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Our system variations derive the similarity score of a given sentence-pair by integrating information from knowledge, corpus, and opinion-based sources 3 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Following prior work from our group (Mihalcea et al., 2006; Mohler and Mihalcea, 2009) , we employ several WordNet-based similarity metrics for the task of sentence-level similarity. Briefly, for each open-class word in one of the input texts, we compute the maximum semantic similarity 4 that can be obtained by pairing it with any open-class word in the other input text. All the word-to-word similarity scores obtained in this way are summed and normalized to the length of the two input texts. We provide below a short description for each of the similarity metrics employed by this system. The shortest path (P ath) similarity is equal to:", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 59, |
| "text": "(Mihalcea et al., 2006;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 60, |
| "end": 86, |
| "text": "Mohler and Mihalcea, 2009)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "Sim path = 1 length (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "where length is the length of the shortest path between two concepts using node-counting. The Leacock & Chodorow (Leacock and Chodorow, 1998 ) (LCH) metric is equal to:", |
| "cite_spans": [ |
| { |
| "start": 113, |
| "end": 140, |
| "text": "(Leacock and Chodorow, 1998", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "Sim lch = \u2212 log length 2 * D (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "where length is the length of the shortest path between two concepts using node-counting, and D is the maximum depth of the taxonomy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "The Lesk (Lesk) similarity of two concepts is defined as a function of the overlap between the corresponding definitions, as provided by a dictionary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "It is based on an algorithm proposed by Lesk (1986) as a solution for word sense disambiguation. The Wu & Palmer (Wu and Palmer, 1994 ) (W U P ) similarity metric measures the depth of two given concepts in the WordNet taxonomy, and the depth of the least common subsumer (LCS), and combines these figures into a similarity score:", |
| "cite_spans": [ |
| { |
| "start": 40, |
| "end": 51, |
| "text": "Lesk (1986)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 113, |
| "end": 133, |
| "text": "(Wu and Palmer, 1994", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Simwup = 2 * depth(LCS) depth(concept1) + depth(concept2)", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "The measure introduced by Resnik (Resnik, 1995) (RES) returns the information content (IC) of the LCS of two concepts:", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 47, |
| "text": "(Resnik, 1995)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Simres = IC(LCS)", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "where IC is defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "IC(c) = \u2212 log P (c)", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "and P (c) is the probability of encountering an instance of concept c in a large corpus. The measure introduced by Lin (Lin, 1998) (Lin) builds on Resnik's measure of similarity, and adds a normalization factor consisting of the information content of the two input concepts:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Sim lin = 2 * IC(LCS) IC(concept1) + IC(concept2)", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "We also consider the Jiang & Conrath (Jiang and Conrath, 1997) (JCN ) measure of similarity:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Simjnc = 1 IC(concept1) + IC(concept2) \u2212 2 * IC(LCS)", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Knowledge-Based Features", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "While most of the corpus-based methods induce semantic profiles in a word-space, where the semantic profile of a word is expressed in terms of its cooccurrence with other words, LSA, ESA and SSA rely on a concept-space representation, thus expressing a word's semantic profile in terms of the implicit (LSA), explicit (ESA), or salient (SSA) concepts. This departure from the sparse word-space to a denser, richer, and unambiguous concept-space resolves one of the fundamental problems in semantic relatedness, namely the vocabulary mismatch.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Based Features", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "Latent Semantic Analysis (LSA) (Landauer et al., 1997) . In LSA, term-context associations are captured by means of a dimensionality reduction operated by a singular value decomposition (SVD) on the term-by-context matrix T, where the matrix is induced from a large corpus. This reduction entails the abstraction of meaning by collapsing similar contexts and discounting noisy and irrelevant ones, hence transforming the real world term-context space into a word-latent-concept space which achieves a much deeper and concrete semantic representation of words 5 .", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 54, |
| "text": "(Landauer et al., 1997)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Based Features", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "Random Projection (RP ) (Dasgupta, 1999) . In RP, a high dimensional space is projected onto a lower dimensional one, using a randomly generated matrix. (Bingham and Mannila, 2001) show that unlike LSA or principal component analysis (PCA), RP is computationally efficient for large corpora, while also retaining accurate vector similarity and yielding comparable results.", |
| "cite_spans": [ |
| { |
| "start": 24, |
| "end": 40, |
| "text": "(Dasgupta, 1999)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 153, |
| "end": 180, |
| "text": "(Bingham and Mannila, 2001)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Based Features", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "Explicit Semantic Analysis (ESA) (Gabrilovich and Markovitch, 2007) . ESA uses encyclopedic knowledge in an information retrieval framework to generate a semantic interpretation of words. It relies on the distribution of words inside Wikipedia articles, thus building a semantic representation for a given word using a word-document association.", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 67, |
| "text": "(Gabrilovich and Markovitch, 2007)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Based Features", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "Salient Semantic Analysis (SSA) (Hassan and Mihalcea, 2011) . SSA incorporates a similar semantic abstraction as ESA, yet it uses salient concepts gathered from encyclopedic knowledge, where a \"concept\" represents an unambiguous expression which affords an encyclopedic definition. Saliency in this case is determined based on the word being hyperlinked in context, implying that it is highly relevant to the given text.", |
| "cite_spans": [ |
| { |
| "start": 32, |
| "end": 59, |
| "text": "(Hassan and Mihalcea, 2011)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Based Features", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "In order to determine the similarity of two text fragments, we employ two variations: the typical cosine similarity (cos) and a best alignment strategy (align), which we explain in more detail in the paragraph below. Both variations were paired with the ESA, and SSA systems resulting in four similarity scores that were used as features by our meta-system, namely ESA cos , ESA align , SSA cos , and SSA align ; in addition, we also used BOW cos , LSA cos , and RP cos .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Based Features", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "Best Alignment Strategy (align). Let T a and T b be two text fragments of size a and b respectively. After removing all stopwords, we first determine the num- 5 We use the LSA implementation available at code. google.com/p/semanticvectors/. ber of shared terms (\u03c9) between T a and T b . Second, we calculate the semantic relatedness of all possible pairings between non-shared terms in T a and T b . We further filter these possible combinations by creating a list \u03d5 which holds the strongest semantic pairings between the fragments' terms, such that each term can only belong to one and only one pair.", |
| "cite_spans": [ |
| { |
| "start": 159, |
| "end": 160, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Based Features", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Sim(Ta, T b ) = (\u03c9 + |\u03d5| i=1 \u03d5i) \u00d7 (2ab) a + b", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Corpus Based Features", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "where \u03d5 i is the similarity score for the ith pairing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus Based Features", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "We design opinion-aware features to capture sentence similarity on the subjectivity level based on the output of three subjectivity analysis systems. Intuitively, two sentences are similar in terms of subjectivity if there exists similar opinion expressions which also share similar opinion holders.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Opinion Aware Features", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "OpinionFinder ) is a publicly available opinion extraction model that annotates the subjectivity of new text based on the presence (or absence) of words or phrases in a large lexicon. The system consists of a two step process, by feeding the sentences identified as subjective or objective by a rule-based high-precision classifier to a highrecall classifier that iteratively learns from the remaining corpus. For each sentence in a STS pair, the two classifiers provide two predictions; a subjectivity similarity score (SUBJSL) is computed as follows. If both sentences are classified as subjective or objective, the score is 1; if one is subjective and the other one is objective, the score is -1; otherwise it is 0. We also make use of the output of the subjective expression identifier in OpinionFinder. We first record how many expressions the two sentences have: feature NUMEX1 and NUMEX2. Then we compare how many tokens these expressions share and we normalize by the total number of expressions (feature EXPR).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Opinion Aware Features", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "We compute the difference between the probabilities of the two sentences being subjective (SUBJD-IFF), by employing a logistic regression classifier using LIBLINEAR (Fan et al., 2008) trained on the MPQA corpus. The smaller the difference, the more similar the sentences are in terms of subjectivity.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 183, |
| "text": "(Fan et al., 2008)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Opinion Aware Features", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "We also employ features produced by the opinionextraction model of Yang and Cardie (Yang and Cardie, 2012) , which is better suited to process ex-pressions of arbitrary length. Specifically, for each sentence, we extract subjective expressions and generate the following features. SUBJCNT is a binary feature which is equal to 1 if both sentences contain a subjective expression. DSEALGN marks the number of shared words between subjective expressions in two sentences, while DSESIM represents their similarity beyond the word level. We represent the subjective expressions in each sentence as a feature vector, containing unigrams extracted from the expressions, their part-of-speech, their WordNet hypernyms and their subjectivity label 6 , and compute the cosine similarity between the feature vectors. The holder of the opinion expressions is extracted with the aid of a dependency parser 7 . In most cases, the opinion holder and the opinion expression are related by the dependency relation subj. This relation is used to expand the verb dependents in the opinion expression and identify the opinion holder or AGENT.", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 106, |
| "text": "(Yang and Cardie, 2012)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Opinion Aware Features", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "Each metric described above provides one individual score for every sentence-pair in both the training and test set. These scores then serve as input to a meta-learner, which adjusts their importance, and thus their bearing on the overall similarity score predicted by the system. We experimented with regression and decision tree based algorithms by performing 10-fold cross validation on the 2012 training data; these types of learners are particularly well suited to maintain the ordinality of the semantic similarity scores (i.e. a score of 4.5 is closer to either 4 or 5, implying that the two sentences are mostly or fully equivalent, while also being far further away from 0, implying no semantic relatedness between the two sentences). We obtained consistent results when using support vector regression with polynomial kernel (Drucker et al., 1997; Smola and Schoelkopf, 1998 ) (SV R) and random subspace meta-classification with tree learners (Ho, 1998) ", |
| "cite_spans": [ |
| { |
| "start": 835, |
| "end": 857, |
| "text": "(Drucker et al., 1997;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 858, |
| "end": 884, |
| "text": "Smola and Schoelkopf, 1998", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 953, |
| "end": 963, |
| "text": "(Ho, 1998)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Meta-learning", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "(RandSubspace) 8 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Meta-learning", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We submitted three system variations based on the training corpus (first word in the sys- 6 Label is based on the OpinionFinder subjectivity lexicon (Wiebe et al., 2005 ).", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 91, |
| "text": "6", |
| "ref_id": null |
| }, |
| { |
| "start": 149, |
| "end": 168, |
| "text": "(Wiebe et al., 2005", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Meta-learning", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "7 nlp.stanford.edu/software/ 8 Included with the Weka framework (Hall et al., 2009) ; we used the default values for both algorithms. Table 2 lists the correlations obtained between the scores assigned by each one of the features we used and the scores assigned by the human judges. It is interesting to note that overall, corpusbased measures are stronger performers compared to knowledge-based measures. The top contenders in the former group are ESA align , SSA align , LSA cos , and RP cos , indicating that these methods are able to leverage a significant amount of semantic information from text. While LSA cos achieves high correlations on many of the datasets, replacing the singular value decomposition operation by random projection to a lower-dimension space (RP ) achieves competitive results while also being computationally efficient. This observation is in line with prior literature (Bingham and Mannila, 2001 ). Among the knowledge-based methods, JCN and P ath achieve high performance on more than five of the datasets. In some cases, particularly on the 2013 test data, the shortest path method (P ath) peforms better or on par with the performance attained by other knowledge-based measures, despite its computational simplicity. While opinion-based measures do not exhibit the same high correlation, we should remember that none of the datasets displays consistent opinion content, nor were they annotated with this aspect in mind, in order for this information to be properly leveraged and evaluated. Nonetheless, we notice several promising features, such as DSEALIGN and EXP R. Lower correlations seem to be associated with shorter spans of text, since when averaging all opinion-based correlations per dataset, M SRvid (x2), OnW N (x2), and headlines display the lowest average correlation, ranging from 0 to 0.03. This matches the expectation that opinionated content can be easier identified in longer contexts, as additional subjective elements amount to a stronger prediction. The other seven datasets consist of longer spans of text; they display an average opinion-based correlation between 0.07 and 0.12, with the exception of F N W N and SM T news at 0.04 and 0.01, respectively. Our systems performed well, ranking 38, 39 and 45 among the 88 competing systems in *SEM 2013 (see Table 1 ), with the best being comb.SVR and comb.RandSubspace, both with a mean correlation of 0.494. We noticed from our participation in SEMEVAL 2012 (Banea et al., 2012) , that training and testing on the same type of data achieves the best results; this receives further support when considering the performance of the indv.RandSubspace variation on the OnWN data 10 , which exhibits a #0208798 and #0916046. This work was supported in part by DARPA-BAA-12-47 DEFT grant #12475008. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation or the Defense Advanced Research Projects Agency.", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 83, |
| "text": "(Hall et al., 2009)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 899, |
| "end": 925, |
| "text": "(Bingham and Mannila, 2001", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 2464, |
| "end": 2484, |
| "text": "(Banea et al., 2012)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 134, |
| "end": 141, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 2312, |
| "end": 2319, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Meta-learning", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "www.wikipedia.org 2 In the experiments reported in this paper, all the corpusbased methods are trained on the English Wikipedia download from October 2008.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The abbreviation in italics accompanying each method allows for cross-referencing with the results listed inTable 2.4 We use the WordNet::Similarity package(Pedersen et al., 2004).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The SM T training set is a combination of SM T europarl (in this paper abbreviated as SM T ep) and SM T news data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The SM T test data is not part of the same corpus as either 0.034 correlation increase over our next best system (comb.RandSubspace). While we do surpass the bag-of-words cosine baseline (baseline-tokencos) computed by the task organizers by a 0.13 difference in correlation, we fall short by 0.124 from the performance of the best system in the STS task.5 ConclusionsTo participate in the STS *SEM 2013 task, we constructed a meta-learner framework that combines traditional knowledge and corpus-based methods, while also introducing novel opinion analysis based metrics. While the *SEM data is not particularly suited for evaluating the performance of opinion features, this is nonetheless a first step toward conducting text similarity research while also considering the subjective dimension of text. Our system variations ranked 38, 39 and 45 among the 88 participating systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This material is based in part upon work supported by the National Science Foundation CA-REER award #0747340 and IIS awards #1018613, SM T ep or SM T news.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Semeval-2012 task 6: A pilot on semantic textual similarity", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "conjunction with the First Joint Conference on Lexical and Computational Semantics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Agirre, D. Cer, M. Diab, and A. Gonzalez. 2012. Semeval-2012 task 6: A pilot on semantic textual sim- ilarity. In Proceedings of the 6th International Work- shop on Semantic Evaluation (SemEval 2012), in con- junction with the First Joint Conference on Lexical and Computational Semantics (*SEM 2012).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "*SEM 2013 Shared Task: Semantic Textual Similarity, including a Pilot on Typed-Similarity", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gonzalez-Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Second Joint Conference on Lexical and Computational Semantics (*SEM 2013)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Agirre, D. Cer, M. Diab, A. Gonzalez-Agirre, and W. Guo. 2013. *SEM 2013 Shared Task: Semantic Tex- tual Similarity, including a Pilot on Typed-Similarity. In Proceedings of the Second Joint Conference on Lex- ical and Computational Semantics (*SEM 2013), At- lanta, GA, USA.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "UNT: A supervised synergistic approach to semantic text similarity", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Banea", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hassan", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mohler", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the First Joint Conference on Lexical and Computational Semantics (*SEM 2012)", |
| "volume": "", |
| "issue": "", |
| "pages": "635--642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Banea, S. Hassan, M. Mohler, and R. Mihalcea. 2012. UNT: A supervised synergistic approach to seman- tic text similarity. In Proceedings of the First Joint Conference on Lexical and Computational Semantics (*SEM 2012), pages 635-642, Montreal, Canada.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Random projection in dimensionality reduction: applications to image and text data", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Bingham", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Mannila", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the seventh ACM SIGKDD international conference on Knowledge discovery and data mining (KDD 2001)", |
| "volume": "", |
| "issue": "", |
| "pages": "245--250", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Bingham and H. Mannila. 2001. Random projection in dimensionality reduction: applications to image and text data. In Proceedings of the seventh ACM SIGKDD international conference on Knowledge discovery and data mining (KDD 2001), pages 245-250, San Fran- cisco, CA, USA.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Explorations in context space: words, sentences, discourse. Discourse Processes", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Burgess", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Livesay", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lund", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "", |
| "volume": "25", |
| "issue": "", |
| "pages": "211--257", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Burgess, K. Livesay, and K. Lund. 1998. Explorations in context space: words, sentences, discourse. Dis- course Processes, 25(2):211-257.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Word association norms, mutual information, and lexicography", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Church", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Hanks", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Computational Linguistics", |
| "volume": "16", |
| "issue": "1", |
| "pages": "22--29", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. Church and P. Hanks. 1990. Word association norms, mutual information, and lexicography. Computational Linguistics, 16(1):22-29.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Learning mixtures of Gaussians", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Dasgupta", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "40th Annual Symposium on Foundations of Computer Science (FOCS 1999)", |
| "volume": "", |
| "issue": "", |
| "pages": "634--644", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Dasgupta. 1999. Learning mixtures of Gaussians. In 40th Annual Symposium on Foundations of Computer Science (FOCS 1999), pages 634-644, New York, NY, USA.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Support vector regression machines", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Drucker", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "J" |
| ], |
| "last": "Burges", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Kaufman", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Smola", |
| "suffix": "" |
| }, |
| { |
| "first": "Vladimir", |
| "middle": [], |
| "last": "Vapnik", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "9", |
| "issue": "", |
| "pages": "155--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Drucker, C. J. Burges, L. Kaufman, A. Smola, and Vladimir Vapnik. 1997. Support vector regression machines. Advances in Neural Information Process- ing Systems, 9:155-161.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Liblinear: A library for large linear classification", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Hsieh", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "9", |
| "issue": "", |
| "pages": "1871--1874", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Fan, K. Chang, C. Hsieh, X. Wang, and C. Lin. 2008. Liblinear: A library for large linear classification. The Journal of Machine Learning Research, 9:1871-1874.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Computing semantic relatedness using Wikipedia-based explicit semantic analysis", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Gabrilovich", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Markovitch", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 20th AAAI International Conference on Artificial Intelligence (AAAI'07)", |
| "volume": "", |
| "issue": "", |
| "pages": "1606--1611", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Gabrilovich and S. Markovitch. 2007. Comput- ing semantic relatedness using Wikipedia-based ex- plicit semantic analysis. In Proceedings of the 20th AAAI International Conference on Artificial Intelli- gence (AAAI'07), pages 1606-1611, Hyderabad, In- dia.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "The WEKA data mining software: An update. SIGKDD Explorations", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hall", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Holmes", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Pfahringer", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Reutemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [ |
| "H" |
| ], |
| "last": "Witten", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Hall, E. Frank, G. Holmes, B. Pfahringer, P. Reute- mann, and Ian H. Witten. 2009. The WEKA data mining software: An update. SIGKDD Explorations, 11(1).", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Measuring semantic relatedness using salient encyclopedic concepts. Artificial Intelligence", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Hassan", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Hassan and R. Mihalcea. 2011. Measuring semantic relatedness using salient encyclopedic concepts. Arti- ficial Intelligence, Special Issue.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "The Random Subspace Method for Constructing Decision Forests", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Ho", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", |
| "volume": "20", |
| "issue": "8", |
| "pages": "832--844", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. Ho. 1998. The Random Subspace Method for Constructing Decision Forests. IEEE Transactions on Pattern Analysis and Machine Intelligence, 20(8):832- 844.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Second order cooccurrence PMI for determining the semantic similarity of words", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Islam", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 5th Conference on Language Resources and Evaluation (LREC 06)", |
| "volume": "", |
| "issue": "", |
| "pages": "1033--1038", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Islam and D. Inkpen. 2006. Second order co- occurrence PMI for determining the semantic similar- ity of words. In Proceedings of the 5th Conference on Language Resources and Evaluation (LREC 06), vol- ume 2, pages 1033-1038, Genoa, Italy, July.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Semantic Similarity of Short Texts", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Islam", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Nicolas Nicolov, Galia Angelova, and Ruslan Mitkov", |
| "volume": "309", |
| "issue": "", |
| "pages": "227--236", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Islam and D. Inkpen. 2009. Semantic Similarity of Short Texts. In Nicolas Nicolov, Galia Angelova, and Ruslan Mitkov, editors, Recent Advances in Natural Language Processing V, volume 309 of Current Issues in Linguistic Theory, pages 227-236. John Benjamins, Amsterdam & Philadelphia.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Semantic similarity based on corpus statistics and lexical taxonomy", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "J" |
| ], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "W" |
| ], |
| "last": "Conrath", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "International Conference Research on Computational Linguistics (ROCLING X)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. J. Jiang and D. W. Conrath. 1997. Semantic similarity based on corpus statistics and lexical taxonomy. In International Conference Research on Computational Linguistics (ROCLING X), pages 9008+, September.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "How well can passage meaning be derived without using word order? a comparison of latent semantic analysis and humans", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Landauer", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "K L" |
| ], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Laham", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Rehder", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "E" |
| ], |
| "last": "Schreiner", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. Landauer, T. K. L, D. Laham, B. Rehder, and M. E. Schreiner. 1997. How well can passage meaning be derived without using word order? a comparison of latent semantic analysis and humans.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Automatic evaluation of text coherence: Models and representations", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Barzilay", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 19th International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Lapata and R. Barzilay. 2005. Automatic evaluation of text coherence: Models and representations. In Pro- ceedings of the 19th International Joint Conference on Artificial Intelligence, Edinburgh.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Combining local context and WordNet similarity for word sense identification", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Leacock", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Chodorow", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "WordNet: An Electronic Lexical Database", |
| "volume": "", |
| "issue": "", |
| "pages": "305--332", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "C. Leacock and M. Chodorow. 1998. Combining local context and WordNet similarity for word sense identi- fication. In WordNet: An Electronic Lexical Database, pages 305-332.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Automatic sense disambiguation using machine readable dictionaries: how to tell a pine cone from an ice cream cone", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lesk", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "SIGDOC '86: Proceedings of the 5th annual international conference on Systems documentation", |
| "volume": "", |
| "issue": "", |
| "pages": "24--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Lesk. 1986. Automatic sense disambiguation us- ing machine readable dictionaries: how to tell a pine cone from an ice cream cone. In SIGDOC '86: Pro- ceedings of the 5th annual international conference on Systems documentation, pages 24-26, New York, NY, USA. ACM.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Automatic evaluation of summaries using n-gram co-occurrence statistics", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of Human Language Technology Conference (HLT-NAACL 2003)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "C. Lin and E. Hovy. 2003. Automatic evaluation of sum- maries using n-gram co-occurrence statistics. In Pro- ceedings of Human Language Technology Conference (HLT-NAACL 2003), Edmonton, Canada, May.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "An information-theoretic definition of similarity", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of the Fifteenth International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "296--304", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Lin. 1998. An information-theoretic definition of similarity. In Proceedings of the Fifteenth Interna- tional Conference on Machine Learning, pages 296- 304, Madison, Wisconsin.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Corpus-based and knowledge-based measures of text semantic similarity", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Corley", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Strapparava", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the American Association for Artificial Intelligence (AAAI 2006)", |
| "volume": "", |
| "issue": "", |
| "pages": "775--780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Mihalcea, C. Corley, and C. Strapparava. 2006. Corpus-based and knowledge-based measures of text semantic similarity. In Proceedings of the American Association for Artificial Intelligence (AAAI 2006), pages 775-780, Boston, MA, US.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "WordNet: a Lexical database for English. Communications of the Association for Computing Machinery", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [ |
| "A" |
| ], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "", |
| "volume": "38", |
| "issue": "", |
| "pages": "39--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G. A. Miller. 1995. WordNet: a Lexical database for English. Communications of the Association for Com- puting Machinery, 38(11):39-41.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Text-to-text semantic similarity for automatic short answer grading", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Mohler", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the European Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Mohler and R. Mihalcea. 2009. Text-to-text seman- tic similarity for automatic short answer grading. In Proceedings of the European Association for Compu- tational Linguistics (EACL 2009), Athens, Greece.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Learning to grade short answer questions using semantic similarity measures and dependency graph alignments", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Mohler", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Bunescu", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Association for Computational Linguistics -Human Language Technologies (ACL-HLT 2011)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Mohler, R. Bunescu, and R. Mihalcea. 2011. Learn- ing to grade short answer questions using semantic similarity measures and dependency graph alignments. In Proceedings of the Association for Computational Linguistics -Human Language Technologies (ACL- HLT 2011), Portland, Oregon, USA.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "External plagiarism detection using information retrieval and sequence alignment: Notebook for PAN at CLEF", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "A" |
| ], |
| "last": "Nawab", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stevenson", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Clough", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 5th International Workshop on Uncovering Plagiarism, Authorship, and Social Software Misuse", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. A. Nawab, M. Stevenson, and P. Clough. 2011. External plagiarism detection using information re- trieval and sequence alignment: Notebook for PAN at CLEF 2011. In Proceedings of the 5th International Workshop on Uncovering Plagiarism, Authorship, and Social Software Misuse (PAN 2011).", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. Papineni, S. Roukos, T. Ward, and W. Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meet- ing of the Association for Computational Linguistics, pages 311-318, Philadelphia, PA.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "WordNet:: Similarity-Measuring the Relatedness of Concepts", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Pedersen", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Patwardhan", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Michelizzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the National Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "1024--1025", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Pedersen, S. Patwardhan, and J. Michelizzi. 2004. WordNet:: Similarity-Measuring the Relatedness of Concepts. Proceedings of the National Conference on Artificial Intelligence, pages 1024-1025.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Using information content to evaluate semantic similarity in a taxonomy", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Proceedings of the 14th International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "448--453", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. Resnik. 1995. Using information content to evaluate semantic similarity in a taxonomy. In In Proceedings of the 14th International Joint Conference on Artificial Intelligence, pages 448-453.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Relevance feedback in information retrieval", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Rocchio", |
| "suffix": "" |
| } |
| ], |
| "year": 1971, |
| "venue": "New Jersey", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Rocchio, 1971. Relevance feedback in information re- trieval. Prentice Hall, Ing. Englewood Cliffs, New Jer- sey.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Term weighting approaches in automatic text retrieval", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Salton", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Buckley", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Readings in Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G. Salton and C. Buckley. 1997. Term weighting ap- proaches in automatic text retrieval. In Readings in Information Retrieval. Morgan Kaufmann Publishers, San Francisco, CA.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "The SMART Retrieval System: Experiments in Automatic Document Processing, chapter Computer evaluation of indexing and text processing", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Salton", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lesk", |
| "suffix": "" |
| } |
| ], |
| "year": 1971, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G. Salton and M. Lesk, 1971. The SMART Retrieval Sys- tem: Experiments in Automatic Document Processing, chapter Computer evaluation of indexing and text pro- cessing. Prentice Hall, Ing. Englewood Cliffs, New Jersey.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Automatic text structuring and summarization. Information Processing and Management", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Salton", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Singhal", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Mitra", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Buckley", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G. Salton, A. Singhal, M. Mitra, and C. Buckley. 1997. Automatic text structuring and summarization. Infor- mation Processing and Management, 2(32).", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Automatic word sense discrimination", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Schutze", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Computational Linguistics", |
| "volume": "24", |
| "issue": "1", |
| "pages": "97--124", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Schutze. 1998. Automatic word sense discrimination. Computational Linguistics, 24(1):97-124.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "A tutorial on support vector regression. NeuroCOLT2 Technical Report", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Smola", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Schoelkopf", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Smola and B. Schoelkopf. 1998. A tutorial on sup- port vector regression. NeuroCOLT2 Technical Re- port NC2-TR-1998-030.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Mining the Web for Synonyms: PMI-IR versus LSA on TOEFL", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "D" |
| ], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the 12th European Conference on Machine Learning (ECML'01)", |
| "volume": "", |
| "issue": "", |
| "pages": "491--502", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. D. Turney. 2001. Mining the Web for Synonyms: PMI-IR versus LSA on TOEFL. In Proceedings of the 12th European Conference on Machine Learning (ECML'01), pages 491-502, Freiburg, Germany.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Creating subjective and objective sentence classifiers from unannotated texts", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Riloff", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 6th international conference on Computational Linguistics and Intelligent Text Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "486--497", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Wiebe and E. Riloff. 2005. Creating subjective and objective sentence classifiers from unannotated texts. In Proceedings of the 6th international conference on Computational Linguistics and Intelligent Text Pro- cessing (CICLing 2005), pages 486-497, Mexico City, Mexico.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Annotating expressions of opinions and emotions in language. Language Resources and Evaluation", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "39", |
| "issue": "", |
| "pages": "165--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wiebe, T. Wilson, and C. Cardie. 2005. Annotating ex- pressions of opinions and emotions in language. Lan- guage Resources and Evaluation, 39(2-3):165-210.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "OpinionFinder: A system for subjectivity analysis", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Somasundaran", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kessler", |
| "suffix": "" |
| }, |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| }, |
| { |
| "first": "Ellen", |
| "middle": [], |
| "last": "Riloff", |
| "suffix": "" |
| }, |
| { |
| "first": "Siddharth", |
| "middle": [], |
| "last": "Patwardhan", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of HLT/EMNLP on Interactive Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "34--35", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Wilson, P. Hoffmann, S. Somasundaran, J. Kessler, Janyce Wiebe, Yejin Choi, Claire Cardie, Ellen Riloff, and Siddharth Patwardhan. 2005. OpinionFinder: A system for subjectivity analysis. In Proceedings of HLT/EMNLP on Interactive Demonstrations, pages 34-35, Vancouver, BC, Canada.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Verbs semantics and lexical selection", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Proceedings of the 32nd annual meeting on Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "133--138", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Z. Wu and M. Palmer. 1994. Verbs semantics and lexical selection. In Proceedings of the 32nd annual meeting on Association for Computational Linguistics, pages 133--138, Las Cruces, New Mexico.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Extracting opinion expressions with semi-markov conditional random fields", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B. Yang and C. Cardie. 2012. Extracting opinion expres- sions with semi-markov conditional random fields. In Proceedings of the conference on Empirical Meth- ods in Natural Language Processing. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "content": "<table><tr><td>tem name) or the learning methodology (second</td></tr><tr><td>word) used: comb.RandSubspace, comb.SV R and</td></tr><tr><td>indv.RandSubspace. For comb, training was per-</td></tr><tr><td>formed on the merged version of the entire 2012 SE-</td></tr><tr><td>MEVAL dataset. For indv, predictions for OnW N</td></tr><tr><td>and SM T test data were based on training on</td></tr><tr><td>matching OnW N and SM T 9 data from 2012, pre-</td></tr><tr><td>dictions for the other test sets were computed using</td></tr><tr><td>the combined version (comb).</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "text": "Evaluation results(Agirre et al., 2013).", |
| "num": null |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null, |
| "text": "Correlation of individual features for the training and test sets with the gold standard.", |
| "num": null |
| } |
| } |
| } |
| } |