ACL-OCL / Base_JSON /prefixE /json /E14 /E14-1044.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "E14-1044",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T10:39:43.797795Z"
},
"title": "A Knowledge-based Representation for Cross-Language Document Retrieval and Categorization",
"authors": [
{
"first": "Marc",
"middle": [],
"last": "Franco-Salvador",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Sapienza Universit\u00e0 di Roma",
"location": {
"country": "Italy"
}
},
"email": "francosalvador@di.uniroma1.it"
},
{
"first": "Paolo",
"middle": [],
"last": "Rosso",
"suffix": "",
"affiliation": {
"laboratory": "Natural Language Engineering Lab -PRHLT Research Center Universitat Polit\u00e8cnica de Val\u00e8ncia",
"institution": "",
"location": {
"country": "Spain"
}
},
"email": "prosso@dsic.upv.es"
},
{
"first": "Roberto",
"middle": [],
"last": "Navigli",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Sapienza Universit\u00e0 di Roma",
"location": {
"country": "Italy"
}
},
"email": "navigli@di.uniroma1.it"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Current approaches to cross-language document retrieval and categorization are based on discriminative methods which represent documents in a low-dimensional vector space. In this paper we propose a shift from the supervised to the knowledge-based paradigm and provide a document similarity measure which draws on BabelNet, a large multilingual knowledge resource. Our experiments show state-of-the-art results in cross-lingual document retrieval and categorization.",
"pdf_parse": {
"paper_id": "E14-1044",
"_pdf_hash": "",
"abstract": [
{
"text": "Current approaches to cross-language document retrieval and categorization are based on discriminative methods which represent documents in a low-dimensional vector space. In this paper we propose a shift from the supervised to the knowledge-based paradigm and provide a document similarity measure which draws on BabelNet, a large multilingual knowledge resource. Our experiments show state-of-the-art results in cross-lingual document retrieval and categorization.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "The huge amount of text that is available online is becoming ever increasingly multilingual, providing an additional wealth of useful information. Most of this information, however, is not easily accessible to the majority of users because of language barriers which hamper the cross-lingual search and retrieval of knowledge.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Today's search engines would benefit greatly from effective techniques for the cross-lingual retrieval of valuable information that can satisfy a user's needs by not only providing (Landauer and Littman, 1994) and translating (Munteanu and Marcu, 2005) relevant results into different languages, but also by reranking the results in a language of interest on the basis of the importance of search results in other languages.",
"cite_spans": [
{
"start": 181,
"end": 209,
"text": "(Landauer and Littman, 1994)",
"ref_id": "BIBREF17"
},
{
"start": 226,
"end": 252,
"text": "(Munteanu and Marcu, 2005)",
"ref_id": "BIBREF24"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Vector-based models are typically used in the literature for representing documents both in monolingual and cross-lingual settings (Manning et al., 2008) . However, because of the large size of the vocabulary, having each term as a component of the vector makes the document representation very sparse. To address this issue several approaches to dimensionality reduction have been proposed, such as Principal Component Analysis (Jolliffe, 1986) , Latent Semantic Indexing (Hull, 1994) , Latent Dirichlet Allocation (LDA) (Blei et al., 2003) and variants thereof, which project these vectors into a lower-dimensional vector space. In order to enable multilinguality, the vectors of comparable documents written in different languages are concatenated, making up the document matrix which is then reduced using linear projection (Platt et al., 2010; Yih et al., 2011) . However, to do so, comparable documents are needed as training. Additionally, the lower dimensional representations are not of easy interpretation.",
"cite_spans": [
{
"start": 131,
"end": 153,
"text": "(Manning et al., 2008)",
"ref_id": "BIBREF18"
},
{
"start": 429,
"end": 445,
"text": "(Jolliffe, 1986)",
"ref_id": "BIBREF16"
},
{
"start": 473,
"end": 485,
"text": "(Hull, 1994)",
"ref_id": "BIBREF14"
},
{
"start": 522,
"end": 541,
"text": "(Blei et al., 2003)",
"ref_id": null
},
{
"start": 828,
"end": 848,
"text": "(Platt et al., 2010;",
"ref_id": "BIBREF30"
},
{
"start": 849,
"end": 866,
"text": "Yih et al., 2011)",
"ref_id": "BIBREF39"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The availability of wide-coverage lexical knowledge resources extracted automatically from Wikipedia, such as DBPedia (Bizer et al., 2009) , YAGO (Hoffart et al., 2013) and BabelNet (Navigli and Ponzetto, 2012a) , has considerably boosted research in several areas, especially where multilinguality is a concern (Hovy et al., 2013) . Among these latter are cross-language plagiarism detection (Potthast et al., 2011; Franco-Salvador et al., 2013) , multilingual semantic relatedness (Navigli and Ponzetto, 2012b; Nastase and Strube, 2013) and semantic alignment (Navigli and Ponzetto, 2012a; Matuschek and Gurevych, 2013) . One main advantage of knowledge-based methods is that they provide a human-readable, semantically interconnected, representation of the textual item at hand (be it a sentence or a document).",
"cite_spans": [
{
"start": 118,
"end": 138,
"text": "(Bizer et al., 2009)",
"ref_id": "BIBREF1"
},
{
"start": 146,
"end": 168,
"text": "(Hoffart et al., 2013)",
"ref_id": "BIBREF12"
},
{
"start": 182,
"end": 211,
"text": "(Navigli and Ponzetto, 2012a)",
"ref_id": "BIBREF27"
},
{
"start": 312,
"end": 331,
"text": "(Hovy et al., 2013)",
"ref_id": "BIBREF13"
},
{
"start": 393,
"end": 416,
"text": "(Potthast et al., 2011;",
"ref_id": "BIBREF32"
},
{
"start": 417,
"end": 446,
"text": "Franco-Salvador et al., 2013)",
"ref_id": "BIBREF8"
},
{
"start": 483,
"end": 512,
"text": "(Navigli and Ponzetto, 2012b;",
"ref_id": "BIBREF28"
},
{
"start": 513,
"end": 538,
"text": "Nastase and Strube, 2013)",
"ref_id": "BIBREF25"
},
{
"start": 562,
"end": 591,
"text": "(Navigli and Ponzetto, 2012a;",
"ref_id": "BIBREF27"
},
{
"start": 592,
"end": 621,
"text": "Matuschek and Gurevych, 2013)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Following this trend, in this paper we provide a knowledge-based representation of documents which goes beyond the lexical surface of text, while at the same time avoiding the need for training in a cross-language setting. To achieve this we leverage a multilingual semantic network, i.e., BabelNet, to obtain language-independent representations, which contain concepts together with semantic relations between them, and also include semantic knowledge which is just implied by the input text. The integration of our multilingual graph model with a vector representation enables us to obtain state-of-the-art results in comparable document retrieval and cross-language text categorization.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The mainstream representation of documents for monolingual and cross-lingual document retrieval is vector-based. A document vector, whose components quantify the relevance of each term in the document, is usually highly dimensional, because of the variety of terms used in a document collection. As a consequence, the resulting document matrices are very sparse. To address the data sparsity issue, several approaches to the reduction of dimensionality of document vectors have been proposed in the literature. A popular class of methods is based on linear projection, which provides a low-dimensional mapping from a high dimensional vector space. A historical approach to linear projection is Principal Component Analysis (PCA) (Jolliffe, 1986) , which performs a singular value decomposition (SVD) on a document matrix D of size n \u00d7 m, where each row in D is the term vector representation of a document. PCA uses an orthogonal transformation to convert a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called principal components, which make up the low-dimensional vector. Latent Semantic Analysis (LSA) (Deerwester et al., 1990 ) is very similar to PCA but performs the SVD using the correlation matrix instead of the covariance matrix, which implies a lower computational cost. LSA preserves the amount of variance in an eigenvector v by maximizing its Rayleigh ratio:",
"cite_spans": [
{
"start": 729,
"end": 745,
"text": "(Jolliffe, 1986)",
"ref_id": "BIBREF16"
},
{
"start": 1169,
"end": 1193,
"text": "(Deerwester et al., 1990",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "v T C v v T v , where C = D T D is the correlation matrix of D.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "A generalization of PCA, called Oriented Principal Component Analysis (OPCA) (Diamantaras and Kung, 1996) , is based on a noise covariance matrix to project the similar components of D closely. Other projection models such as Latent Dirichlet Allocation (LDA) (Blei et al., 2003) are based on the extraction of generative models from documents. Another approach, named Explicit Semantic Analysis (ESA) (Gabrilovich and Markovitch, 2007) , represents each document by its similarities to a document collection. Using a low domain specificity document collection such as Wikipedia, the model has proven to obtain competitive results.",
"cite_spans": [
{
"start": 77,
"end": 105,
"text": "(Diamantaras and Kung, 1996)",
"ref_id": "BIBREF5"
},
{
"start": 260,
"end": 279,
"text": "(Blei et al., 2003)",
"ref_id": null
},
{
"start": 402,
"end": 436,
"text": "(Gabrilovich and Markovitch, 2007)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Not only have these methods proven to be successful in a monolingual scenario (Deerwester et al., 1990; Hull, 1994) , but they have also been adapted to perform well in tasks at a crosslanguage level (Potthast et al., 2008; Platt et al., 2010; Yih et al., 2011) . Cross-language Latent Semantic Indexing (CL-LSI) (Dumais et al., 1997) was the first linear projection approach used in cross-lingual tasks. CL-LSI provides a crosslingual representation for documents by reducing the dimensionality of a matrix D whose rows are obtained by concatenating comparable documents from different languages. Similarly, PCA and OPCA can be adapted to a multilingual setting. LDA was also adapted to perform in a multilingual scenario with models such as Polylingual Topic Models (Mimno et al., 2009) , Joint Probabilistic LSA and Coupled Probabilistic LSA (Platt et al., 2010) , which, however, are constrained to using word counts, instead of better weighting strategies, such as log(tf)-idf, known to perform better with large vocabularies (Salton and McGill, 1986) . Another variant, named Canonical Correlation Analysis (CCA) (Thompson, 2005) , uses a cross-covariance matrix of the low-dimensional vectors to find the projections. Cross-language Explicit Semantic Analysis (CL-ESA) (Potthast et al., 2008; Cimiano et al., 2009; Potthast et al., 2011) , instead, adapts ESA to be used at crosslanguage level by exploiting the comparable documents across languages from Wikipedia. CL-ESA represents each document written in a language L by its similarities with a document collection in the same language L. Using a multilingual document collection with comparable documents across languages, the resulting vectors from different languages can be compared directly.",
"cite_spans": [
{
"start": 78,
"end": 103,
"text": "(Deerwester et al., 1990;",
"ref_id": "BIBREF4"
},
{
"start": 104,
"end": 115,
"text": "Hull, 1994)",
"ref_id": "BIBREF14"
},
{
"start": 200,
"end": 223,
"text": "(Potthast et al., 2008;",
"ref_id": "BIBREF31"
},
{
"start": 224,
"end": 243,
"text": "Platt et al., 2010;",
"ref_id": "BIBREF30"
},
{
"start": 244,
"end": 261,
"text": "Yih et al., 2011)",
"ref_id": "BIBREF39"
},
{
"start": 313,
"end": 334,
"text": "(Dumais et al., 1997)",
"ref_id": "BIBREF6"
},
{
"start": 768,
"end": 788,
"text": "(Mimno et al., 2009)",
"ref_id": "BIBREF22"
},
{
"start": 845,
"end": 865,
"text": "(Platt et al., 2010)",
"ref_id": "BIBREF30"
},
{
"start": 1031,
"end": 1056,
"text": "(Salton and McGill, 1986)",
"ref_id": "BIBREF35"
},
{
"start": 1119,
"end": 1135,
"text": "(Thompson, 2005)",
"ref_id": "BIBREF37"
},
{
"start": 1276,
"end": 1299,
"text": "(Potthast et al., 2008;",
"ref_id": "BIBREF31"
},
{
"start": 1300,
"end": 1321,
"text": "Cimiano et al., 2009;",
"ref_id": "BIBREF3"
},
{
"start": 1322,
"end": 1344,
"text": "Potthast et al., 2011)",
"ref_id": "BIBREF32"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "An alternative unsupervised approach, Crosslanguage Character n-Grams (CL-CNG) (Mcnamee and Mayfield, 2004), does not draw upon linear projections and represents documents as vectors of character n-grams. It has proven to obtain good results in cross-language document retrieval (Potthast et al., 2011) between languages with lexical and syntactic similarities.",
"cite_spans": [
{
"start": 279,
"end": 302,
"text": "(Potthast et al., 2011)",
"ref_id": "BIBREF32"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "Recently, a novel supervised linear projection model based on Siamese Neural Networks (S2Net) (Yih et al., 2011) achieved state-of-theart performance in comparable document retrieval. S2Net performs a linear combination of the terms of a document vector d to obtain a reduced vector r, which is the output layer of a neural network. Each element in r has a weight which is a linear combination of the original weights of d, and captures relationships between the original terms.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "However, linear projection approaches need a high number of training documents to achieve state-of-the-art performance (Platt et al., 2010; Yih et al., 2011) . Moreover, although they are good at identifying a few principal components, the representations produced are opaque, in that they cannot explicitly model the semantic content of documents with a human-interpretable representation, thereby making the data analysis difficult. In this paper, instead, we propose a languageindependent knowledge graph representation for documents which is obtained from a large multilingual semantic network, without using any training information. Our knowledge graph representation explicitly models the semantics of the document in terms of the concepts and relations evoked by its co-occurring terms.",
"cite_spans": [
{
"start": 119,
"end": 139,
"text": "(Platt et al., 2010;",
"ref_id": "BIBREF30"
},
{
"start": 140,
"end": 157,
"text": "Yih et al., 2011)",
"ref_id": "BIBREF39"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "We propose a knowledge-based document representation aimed at expanding the terms in a document's bag of words by means of a knowledge graph which provides concepts and semantic relations between them. Key to our approach is the use of a graph representation which does not depend on any given language, but, indeed, is multilingual. To build knowledge graphs of this kind we utilize BabelNet, a multilingual semantic network that we present in Section 3.1. Then, in Section 3.2, we describe the five steps needed to obtain our graph-based multilingual representation of documents. Finally, we introduce our knowledge graph similarity measure in Section 3.3.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "A Knowledge-based Document Representation",
"sec_num": "3"
},
{
"text": "BabelNet (Navigli and Ponzetto, 2012a ) is a multilingual semantic network whose concepts and relations are obtained from the largest available semantic lexicon of English, WordNet (Fellbaum, 1998) , and the largest wide-coverage collaboratively-edited encyclopedia, Wikipedia, by means of an automatic mapping algorithm. Ba-belNet is therefore a multilingual \"encyclopedic dictionary\" that combines lexicographic information with wide-coverage encyclopedic knowledge. Concepts in BabelNet are represented similarly to WordNet, i.e., by grouping sets of synonyms in the different languages into multilingual synsets. Multilingual synsets contain lexicalizations from WordNet synsets, the corresponding Wikipedia pages and additional translations output by a statistical machine translation system. The relations between synsets are collected from WordNet and from Wikipedia's hyperlinks between pages.",
"cite_spans": [
{
"start": 9,
"end": 37,
"text": "(Navigli and Ponzetto, 2012a",
"ref_id": "BIBREF27"
},
{
"start": 181,
"end": 197,
"text": "(Fellbaum, 1998)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "BabelNet",
"sec_num": "3.1"
},
{
"text": "We note that, in principle, we could use any multilingual network providing a similar kind of information, e.g., EuroWordNet (Vossen, 2004) . However, in our work we chose BabelNet because of its larger size, its coverage of both lex-icographic and encyclopedic knowledge, and its free availability. 1 In our work we used BabelNet 1.0, which encodes knowledge for six languages, namely: Catalan, English, French, German, Italian and Spanish.",
"cite_spans": [
{
"start": 125,
"end": 139,
"text": "(Vossen, 2004)",
"ref_id": "BIBREF38"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "BabelNet",
"sec_num": "3.1"
},
{
"text": "We now introduce our five-step method for representing a given document d from a collection D of documents written in language L as a languageindependent knowledge graph.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "Building a Basic Vector Representation Initially we transform a document d into a traditional vector representation. To do this, we score each term t i \u2208 d with a weight w i . This weight is usually a function of term and document frequency. Following the literature, one method that works well is the log tf-idf weighting (Salton et al., 1983; Salton and McGill, 1986) :",
"cite_spans": [
{
"start": 323,
"end": 344,
"text": "(Salton et al., 1983;",
"ref_id": "BIBREF36"
},
{
"start": 345,
"end": 369,
"text": "Salton and McGill, 1986)",
"ref_id": "BIBREF35"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "w i = log 2 (f i + 1)log 2 (n/n i ).",
"eq_num": "(1)"
}
],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "where f i is the number of times term i occurs in document d, n is the total number of documents in the collection and n i is the number of documents that contain t i . We then create a weighted term vector v = (w 1 , ..., w n ), where w i is the weight corresponding to term t i . We exclude stopwords from the vector.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "Selecting the Relevant Document Terms We then create the set T of base forms, i.e., lemmas 2 , of the terms in the document d. In order to keep only the most relevant terms, we sort the terms T according to their weight in vector v and retain a maximum number of K terms, obtaining a set of terms T K . 3 The value of K is calculated as a function of the vector size, as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "K = (log 2 (1 + | v|)) 2 ,",
"eq_num": "(2)"
}
],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "The rationale is that K must be high enough to ensure a good conceptual representation but not too high, so as to avoid as much noise as possible in the set T K .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "Populating the Graph with Initial Concepts Next, we create an initially-empty knowledge graph G = (V, E), i.e., such that V = E = \u2205.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "We populate the vertex set V with the set S K of all the synsets in BabelNet which contain any term in T K in the document language L, that is: Figure 1 : (a) initial graph from T K = {\"European\", \"apple\", \"tree\", \"Malus\", \"species\", \"America\"}; (b) knowledge graph obtained by retrieving all paths from BabelNet. Gray nodes are the original concepts.",
"cite_spans": [],
"ref_spans": [
{
"start": 144,
"end": 152,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "S K = t\u2208T K Synsets L (t),",
"eq_num": "(3)"
}
],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "where Synsets L (t) is the set of synsets in Ba-belNet which contain a term t in the language of interest L. For example, in Figure 1 (a) we show the initial graph obtained from the set T K = {\"European\", \"apple\", \"tree\", \"Malus\", \"species\", \"America\"}. Note, however, that each retrieved synset is multilingual, i.e., it contains lexicalizations for the same concept in other languages too. Therefore, the nodes of our knowledge graph provide a language-independent representation of the document's content.",
"cite_spans": [],
"ref_spans": [
{
"start": 125,
"end": 133,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "Creating the Knowledge Graph Similarly to Navigli and Lapata (2010), we create the knowledge graph by searching BabelNet for paths connecting pairs of synsets in V . Formally, for each",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "pair v, v \u2208 V such that v and v do not share any lexicalization 4 in T K , for each path in BabelNet v \u2192 v 1 \u2192 . . . \u2192 v n \u2192 v , we set: V := V \u222a {v 1 , . . . , v n } and E := E\u222a{(v, v 1 ), . . . , (v n , v )},",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "that is, we add all the path vertices and edges to G. After prototyping, the path length is limited to maximum length 3, so as to avoid an excessive semantic drift. As a result of populating the graph with intermediate edges and vertices, we obtain a knowledge graph which models the semantic context of document d. We point out that our knowledge graph might have different isolated components. We view each component as a different interpretation of document d. To select the main interpretation, we keep only the largest component, i.e., the one with the highest number of vertices, which we consider as the most likely semantic representation of the document content. Figure 1(b) shows the knowledge graph obtained for our example term set. Note that our approach retains, and therefore weights, only the subgraph focused on the \"apple fruit\" meaning.",
"cite_spans": [],
"ref_spans": [
{
"start": 672,
"end": 683,
"text": "Figure 1(b)",
"ref_id": null
}
],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "Knowledge Graph Weighting The final step consists of weighting all the concepts and semantic relations of the knowledge graph G. For weighting relations we use the original weights from BabelNet, which provide the degree of relatedness between the synset end points of each edge (Navigli and Ponzetto, 2012a) . As for concepts, we weight them on the basis of the original weights of the terms in the vector v. In order to score each concept in our knowledge graph G, we applied the topic-sensitive PageRank algorithm (Haveliwala et al., 2003) to G. While the well-known PageRank algorithm (Page et al., 1998) calculates the global importance of vertices in a graph, topic-sensitive PageRank is a variant in which the importance of vertices is biased using a set of representative \"topics\". Formally, the topic-sensitive PageRank vector p is calculated by means of an iterative process until convergence as follows: p = cM p+(1\u2212c) u, where c is the damping factor (conventionally set to 0.85), 1 \u2212 c represents the probability of a surfer randomly jumping to any node in the graph, M is the transition probability matrix of graph G, with M ji = degree(i) \u22121 if an edge from i to j exists, 0 otherwise, u is the random-jumping transition probability vector, where each u i represents the probability of jumping randomly to the node i, and p is the resulting PageRank vector which scores the nodes of G. In contrast to vanilla PageRank, the \"topic-sensitive\" variant gives more probability mass to some nodes in G and less to others. In our case we perturbate u by concentrating the probability mass to the vertices in S K , which are the synsets corresponding to the document terms T K (cf. Formula 3).",
"cite_spans": [
{
"start": 279,
"end": 308,
"text": "(Navigli and Ponzetto, 2012a)",
"ref_id": "BIBREF27"
},
{
"start": 517,
"end": 542,
"text": "(Haveliwala et al., 2003)",
"ref_id": "BIBREF10"
},
{
"start": 589,
"end": 608,
"text": "(Page et al., 1998)",
"ref_id": "BIBREF29"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Knowledge Graph",
"sec_num": "3.2"
},
{
"text": "We can now determine the similarity between two documents d, d \u2208 D in terms of the similarity of their knowledge graph representations G and G .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity between Knowledge Graphs",
"sec_num": "3.3"
},
{
"text": "Following the literature (Montes y G\u00f3mez et al., 2001) we calculate the similarity between the vertex sets in the two graphs using Dice's coefficient (Jackson et al., 1989) : Figure 2 : Knowledge graph examples from two comparable documents in different languages.",
"cite_spans": [
{
"start": 150,
"end": 172,
"text": "(Jackson et al., 1989)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [
{
"start": 175,
"end": 183,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Similarity between Knowledge Graphs",
"sec_num": "3.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "Sc(G, G ) = 2 \u2022 c\u2208V (G)\u2229V (G ) w(c) c\u2208V (G) w(c) + c\u2208V (G ) w(c) ,",
"eq_num": "(4)"
}
],
"section": "Similarity between Knowledge Graphs",
"sec_num": "3.3"
},
{
"text": "where w(c) is the weight of a concept c (see Section 3.2). Likewise, we calculate the similarity between the two edge sets as:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity between Knowledge Graphs",
"sec_num": "3.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "Sr(G, G ) = 2 \u2022 r\u2208E(G)\u2229E(G ) w(r) r\u2208E(G) w(r) + r\u2208E(G ) w(r) ,",
"eq_num": "(5)"
}
],
"section": "Similarity between Knowledge Graphs",
"sec_num": "3.3"
},
{
"text": "where w(r) is the weight of a semantic relation edge r. We combine the two above measures of conceptual (S c ) and relational (S r ) similarity to obtain an integrated measure S g (G, G ) between knowledge graphs:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity between Knowledge Graphs",
"sec_num": "3.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "Sg(G, G ) = Sc(G, G ) + Sr(G, G ) 2 .",
"eq_num": "(6)"
}
],
"section": "Similarity between Knowledge Graphs",
"sec_num": "3.3"
},
{
"text": "Notably, since we are working with a languageindependent representation of documents, this similarity measure can be applied to the knowledge graphs built from documents written in any language. In Figure 2 we show two knowledge graphs for comparable documents written in different languages (for clarity, labels are in English in both graphs). As expected, the graphs share several key concepts and relations.",
"cite_spans": [],
"ref_spans": [
{
"start": 198,
"end": 206,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Similarity between Knowledge Graphs",
"sec_num": "3.3"
},
{
"text": "Since our knowledge graphs will only cover the most central concepts of a document, we complement this core representation with a more traditional vector-based representation. However, as we are interested in the cross-language comparison of documents, we translate our monolingual vector v L of a document d written in language L into its corresponding vector v L in language L Algorithm 1 Dictionary-based term-vector translation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Multilingual Vector",
"sec_num": "4.1"
},
{
"text": "Input: a weighted document vector vL = (w1, . . . , wn), a source language L and a target language L Output: a translated vector v L 1: v L \u2190 (0, . . . , 0) of length n 2: for i = 1 to n 3: if wi = 0 continue 4: // let ti be the term corresponding to wi in vL 5: SL \u2190 Synsets L (ti) 6: for each synset s \u2208 SL 7:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Multilingual Vector",
"sec_num": "4.1"
},
{
"text": "T \u2190 getTranslations(s, L ) 8:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Multilingual Vector",
"sec_num": "4.1"
},
{
"text": "if T = \u2205 then 9:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Multilingual Vector",
"sec_num": "4.1"
},
{
"text": "for each tr \u2208 T 10: wnew = wi \u2022 confidence(tr, ti) 11: // let index(tr) be the index of tr in vL 12:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Multilingual Vector",
"sec_num": "4.1"
},
{
"text": "if \u2203 index(tr) then 13:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Multilingual Vector",
"sec_num": "4.1"
},
{
"text": "v L (index(tr)) = wnew 14: return v L using BabelNet as our multilingual dictionary. We detail the document-vector translation process in Algorithm 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Multilingual Vector",
"sec_num": "4.1"
},
{
"text": "The translated vector v L is obtained as follows: for each term t i with non-zero weight in v L we obtain all the possible meanings of t i in BabelNet (see line 5) and, for each of these, we retrieve all the translations (line 7), i.e., lexicalizations of the concept, in language L available in the synset. We set a non-zero value in the translation vector v L , 5 in correspondence with each such translation tr, proportional to the weight of t i in the original vector and the confidence of the translation (line 10), as provided by the BabelNet semantic network. 6 In order to increase the amount of information available in the vector and counterbalance possible wrong translations, we avoid translating all vectors to one language. Instead, in the present work we create a multilingual vector representation of a document d written in language L by concatenating the corresponding vector v L with the translated vector v L of d for language L . As a result, we obtain a multilingual vector v LL , which contains lexicalizations in both languages.",
"cite_spans": [
{
"start": 567,
"end": 568,
"text": "6",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "From Document to Multilingual Vector",
"sec_num": "4.1"
},
{
"text": "Following common practice for document similarity in the literature (Manning et al., 2008) , we use the cosine similarity as the similarity measure between multilingual vectors:",
"cite_spans": [
{
"start": 68,
"end": 90,
"text": "(Manning et al., 2008)",
"ref_id": "BIBREF18"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity between Multilingual Vectors",
"sec_num": "4.2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "Sv( v LL , v LL ) = v LL \u2022 v LL || v LL || || v LL || .",
"eq_num": "(7)"
}
],
"section": "Similarity between Multilingual Vectors",
"sec_num": "4.2"
},
{
"text": "5 Knowledge-based Document Similarity",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity between Multilingual Vectors",
"sec_num": "4.2"
},
{
"text": "Given a source document d and a target document d , we calculate the similarities between the respective knowledge-graph and multilingual vector representations, and combine them to obtain a knowledge-based similarity as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity between Multilingual Vectors",
"sec_num": "4.2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "KBSim(d, d ) = c(G)Sg(G, G ) + (1 \u2212 c(G))Sv( v LL , v LL ),",
"eq_num": "(8)"
}
],
"section": "Similarity between Multilingual Vectors",
"sec_num": "4.2"
},
{
"text": "where c(G) is an interpolation factor calculated as the edge density of knowledge graph G:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity between Multilingual Vectors",
"sec_num": "4.2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "c(G) = |E(G)| |V (G)|(|V (G)| \u2212 1) .",
"eq_num": "(9)"
}
],
"section": "Similarity between Multilingual Vectors",
"sec_num": "4.2"
},
{
"text": "Note that, using the factor c(G) to interpolate the two similarities in Eq. 8, we determine the relevance for the knowledge graphs and the multilingual vectors in a dynamic way. Indeed, c(G) makes the contribution of graph similarity depend on the richness of the knowledge graph.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Similarity between Multilingual Vectors",
"sec_num": "4.2"
},
{
"text": "In this section we compare our knowledgebased document similarity measure, KBSim, against state-of-the-art models on two different tasks: comparable document retrieval and crosslingual text categorization.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "6"
},
{
"text": "In our first experiment we determine the effectiveness of our knowledge-based approach in a comparable document retrieval task. Given a document d written in language L and a collection D L of documents written in another language L , the task of comparable document retrieval consists of finding the document in D L which is most similar to d, under the assumption that there exists one document d \u2208 D L which is comparable with d.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Comparable Document Retrieval",
"sec_num": "6.1"
},
{
"text": "Dataset We followed the experimental setting described in (Platt et al., 2010; Yih et al., 2011) and evaluated KBSim on the Wikipedia dataset made available by the authors of those papers. The dataset is composed of Wikipedia comparable encyclopedic entries in English and Spanish. For each document in English there exists a \"real\" pair in Spanish which was defined as a comparable entry by the Wikipedia user community. The dataset of each language was split into three parts: 43,380 training, 8,675 development and 8,675 test documents. The documents were tokenized, without stemming, and represented as vectors using a log(tf)-idf weighting (Salton and Buckley, 1988) . The vocabulary of the corpus was restricted to 20,000 terms, which were the most frequent terms in the two languages after removing the top 50 terms.",
"cite_spans": [
{
"start": 58,
"end": 78,
"text": "(Platt et al., 2010;",
"ref_id": "BIBREF30"
},
{
"start": 79,
"end": 96,
"text": "Yih et al., 2011)",
"ref_id": "BIBREF39"
},
{
"start": 645,
"end": 671,
"text": "(Salton and Buckley, 1988)",
"ref_id": "BIBREF34"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Corpus and Task Setting",
"sec_num": "6.1.1"
},
{
"text": "Methodology To evaluate the models we compared each English document against the Spanish dataset and vice versa. Following the original setting, the results are given as the average performance between these two experiments. For evaluation we employed the averaged top-1 accuracy and Mean Reciprocal Rank (MMR) at finding the real comparable document in the other language. We compared KBSim against the state-of-the-art supervised models S2Net, OPCA, CCA, and CL-LSI (cf. Section 2). In contrast to these models, KBSim does not need a training step, so we applied it directly to the testing partition.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpus and Task Setting",
"sec_num": "6.1.1"
},
{
"text": "In addition we also included the results of CL-ESA 7 , CL-C3G 8 and two simple vector-based models which translate all documents into English on a word-by-word basis and compared them using cosine similarity: the first model (CosSim E ) uses a statistical dictionary trained with Europarl using Wavelet-Domain Hidden Markov Models (He, 2007) , a model similar to IBM Model 4; the second model (CosSim BN ) instead uses Algorithm 1 to translate the vectors with BabelNet.",
"cite_spans": [
{
"start": 331,
"end": 341,
"text": "(He, 2007)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Corpus and Task Setting",
"sec_num": "6.1.1"
},
{
"text": "As we can see from Table 1 , 9 the CosSim BN model, which uses BabelNet to translate the document vectors, achieves better results than CCA and CL-LSI. We hypothesize that this is due to these linear projection models losing information during the projection. CosSim E yields results similar to CosSim BN , showing that BabelNet is a good alternative statistical dictionary. In contrast to CCA and CL-LSI, OPCA performs better thanks to its improved projection method using a noise covariance matrix, which enables it to obtain the main components in a low-dimensional space. CL-C3G and CL-ESA obtain the lowest results. Considering that English and Spanish do not have many lexical similarities, the low performance of CL-C3G is justified because these languages do not share many character n-grams. The reason behind the low results of CL-ESA can be explained by the low number of intersecting concepts between Spanish and English in Wikipedia, as confirmed by Potthast et al. (2008) . Despite both using Wikipedia in some way, KBSim obtains much higher performance than CL-ESA thanks to the use of our multilingual knowledge graph representation of documents, which makes it possible to expand and semantically relate its original concepts. As a result, in contrast to CL-ESA, KB-Sim can integrate conceptual and relational similarity functions which provide more accurate performance. Interestingly, KBSim also outperforms OPCA which, in contrast to our system, is supervised, and in terms of accuracy is only 1 point below S2Net, the supervised state-of-the-art model using neural networks.",
"cite_spans": [
{
"start": 963,
"end": 985,
"text": "Potthast et al. (2008)",
"ref_id": "BIBREF31"
}
],
"ref_spans": [
{
"start": 19,
"end": 26,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "6.1.2"
},
{
"text": "The second task in which we tested the different models was cross-language text categorization. The task is defined as follows: given a document d L in a language L and a corpus D L with documents in a different language L , and C possible categories, a system has to classify d L into one of the categories C using the labeled collection D L .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Cross-language Text Categorization",
"sec_num": "6.2"
},
{
"text": "Dataset To perform this task we used the Multilingual Reuters Collection (Amini et al., 2009) , which is composed of five datasets of news from five different languages (English, French, German, Spanish and Italian) and classified into six possi- (Platt et al., 2010) .",
"cite_spans": [
{
"start": 73,
"end": 93,
"text": "(Amini et al., 2009)",
"ref_id": "BIBREF0"
},
{
"start": 247,
"end": 267,
"text": "(Platt et al., 2010)",
"ref_id": "BIBREF30"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Corpus and Task Setting",
"sec_num": "6.2.1"
},
{
"text": "ble categories. In addition, each dataset of news is translated into the other four languages using the Portage translation system (Sadat et al., 2005) .",
"cite_spans": [
{
"start": 131,
"end": 151,
"text": "(Sadat et al., 2005)",
"ref_id": "BIBREF33"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Corpus and Task Setting",
"sec_num": "6.2.1"
},
{
"text": "As a result, we have five different multilingual datasets, each containing source news documents in one language and four sets of translated documents in the other languages. Each of the languages has an independent vocabulary. Document vectors in the collection are created using TFIDFbased weighting.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpus and Task Setting",
"sec_num": "6.2.1"
},
{
"text": "Methodology To evaluate our approach we used the English and Spanish news datasets. From the English news dataset we randomly selected 13,131 news as training and 1,875 as test documents. From the Spanish news dataset we selected all 12,342 news as test documents. To classify both test sets we used the English news training set. We performed the experiment at cross-lingual level using Spanish and English languages available for both Spanish and English news datasets, therefore we classified each test set selecting the documents in English and using the Spanish documents in the training dataset, and vice versa. We followed Platt et al. (2010) and averaged the values obtained from the two comparisons for each test set to obtain the final result. To categorize the documents we applied k-NN to the ranked list of documents according to the similarity measure employed for each model. We evaluated each model by estimating its accuracy in the classification of the English and Spanish test sets.",
"cite_spans": [
{
"start": 630,
"end": 649,
"text": "Platt et al. (2010)",
"ref_id": "BIBREF30"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Corpus and Task Setting",
"sec_num": "6.2.1"
},
{
"text": "We compared our approach against the stateof-the-art supervised models in this task: OPCA, CCA and CL-LSI (Platt et al., 2010) . In addition, we include the results of the CosSim BN and CosSim E models that we introduced in Section 6.1.1, as well as the results of a full statistical machine translation system trained with Europarl and post-processed by LSA (Full MT), as reported by Platt et al. (2010) . Table 2 shows the cross-language text categorization accuracy. CosSim E obtained the lowest results. This is because there is a significant number of untranslated terms in the translation process that the statistical dictionary cannot cover. This is not the case in the CosSim BN model which achieves higher results using BabelNet as a statistical dictionary, especially on the Spanish news corpus.",
"cite_spans": [
{
"start": 106,
"end": 126,
"text": "(Platt et al., 2010)",
"ref_id": "BIBREF30"
},
{
"start": 385,
"end": 404,
"text": "Platt et al. (2010)",
"ref_id": "BIBREF30"
}
],
"ref_spans": [
{
"start": 407,
"end": 414,
"text": "Table 2",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "Corpus and Task Setting",
"sec_num": "6.2.1"
},
{
"text": "On the other hand, however, the linear projection methods as well as Full MT obtained the highest results on the English corpus. The differences between the linear projection methods are evident when looking at the Spanish corpus results; OPCA performed best with a considerable improvement, which indicates again that it is one of the most effective linear projection methods. Finally, our approach, KBSim, obtained competitive results on the English corpus, performing best among the unsupervised systems, and the highest results on the Spanish news, surpassing all alternatives.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "6.2.2"
},
{
"text": "Since KBSim does not need any training for document comparison, and because it based, moreover, on a multilingual lexical resource, we performed an additional experiment to demonstrate its ability to carry out the same text categorization task in many languages. To do this, we used the Multilingual Reuters Collection to create a 3,000 document test dataset and 9,000 training dataset 10 for five languages: English, German, Spanish, French and Italian. Then we calculated the classification accuracy on each test set using each training set. Results are shown in Table 3 .",
"cite_spans": [],
"ref_spans": [
{
"start": 565,
"end": 572,
"text": "Table 3",
"ref_id": "TABREF5"
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "6.2.2"
},
{
"text": "The best results for each language were obtained when working at the monolingual level, which suggests that KBSim might be a good untrained alternative in monolingual tasks, too. In general, cross-language comparisons produced similar results, demonstrating the general applicability of KBSim to arbitrary language pairs in multilingual text categorization. However, we note that German, Italian and Spanish training partitions produced low results compared to the others. After analyzing the length of the documents in the different datasets we discovered that they have different average lengths in words: 79 (EN), 76 (FR), 75 (DE), 60 (ES) and 55 (IT). German, Spanish and especially Italian documents have the lowest average length, which makes it more difficult to build a representative knowledge graph of the content of each document when it is performing at cross-language level. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "6.2.2"
},
{
"text": "In this paper we introduced a knowledge-based approach to represent and compare documents written in different languages. The two main contributions of this work are: i) a new graphbased model for the language-independent representation of documents based on the Babel-Net multilingual semantic network; ii) KBSim, a knowledge-based cross-language similarity measure between documents, which integrates our multilingual graph-based model with a traditional vector representation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "7"
},
{
"text": "In two different cross-lingual tasks, i.e., comparable document retrieval and cross-language text categorization, KBSim has proven to perform on a par or better than the supervised state-of-the-art models which make use of linear projections to obtain the main components of the term vectors. We remark that, in contrast to the best systems in the literature, KBSim does not need any parameter tuning phase nor does it use any training information. Moreover, when scaling to many languages, supervised systems need to be trained on each pair, which can be very costly.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "7"
},
{
"text": "The gist of our approach is in the knowledge graph representation of documents, which relates the original terms using expanded concepts and relations from BabelNet. The knowledge graphs also have the nice feature of being humaninterpretable, a feature that we want to exploit in future work. We will also explore the integration of linear projection models, such as OPCA and S2Net, into our multilingual vector-based similarity measure. Also, to ensure a level playing field, following the competing models, in this work we did not use multi-word expressions as vector components. We will study their impact on KBSim in future work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "7"
},
{
"text": "http://babelnet.org 2 Following the setup of(Platt et al., 2010), our initial data is represented using term vectors. For this reason we lemmatize in this step.3 Since the vector v provides weights for all the word forms, and not only lemmas, occurring in d, we take the best weight among those word forms of the considered lemma.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "This prevents different senses of the same term from being connected via a path in the resulting knowledge graph.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "To make the translation possible, while at the same time keeping the same number of dimensions in our vector representation, we use a shared vocabulary which covers both languages. See Section 6 for details on the experimental setup.6 Non-English lexicalizations in BabelNet have confidence 1 if originating from Wikipedia inter-language links and \u2264 1 if obtained by means of statistical machine translation(Navigli and Ponzetto, 2012a).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Document collections with sizes higher than 10 5 provide high performance(Potthast et al., 2008). Here we used 15k documents from the training set to index the test documents.8 CL-C3G is CL-CNG using character 3-grams, which has proven to be the best length(Mcnamee and Mayfield, 2004).9 In this work, statistically significant results according to a \u03c7 2 test are highlighted in bold.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Note that training is needed for the k-NN classifier, but not for document comparison.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "The authors gratefully acknowledge the support of the ERC Starting Grant MultiJEDI No. 259234, EC WIQ-EI IRSES (Grant No. 269180) and MICINN DIANA-Applications (TIN2012-38603-C02-01). Thanks go to Yih et al. for their support and Jim McManus for his comments.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Learning from multiple partially observed views -an application to multilingual text categorization",
"authors": [
{
"first": "Massih-Reza",
"middle": [],
"last": "Amini",
"suffix": ""
},
{
"first": "Nicolas",
"middle": [],
"last": "Usunier",
"suffix": ""
},
{
"first": "Cyril",
"middle": [],
"last": "Goutte",
"suffix": ""
}
],
"year": 2009,
"venue": "Advances in Neural Information Processing Systems 22 (NIPS 2009)",
"volume": "",
"issue": "",
"pages": "28--36",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Massih-Reza Amini, Nicolas Usunier, and Cyril Goutte. 2009. Learning from multiple partially ob- served views -an application to multilingual text categorization. In Advances in Neural Information Processing Systems 22 (NIPS 2009), pages 28-36.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Dbpedia -a crystallization point for the web of data",
"authors": [
{
"first": "Christian",
"middle": [],
"last": "Bizer",
"suffix": ""
},
{
"first": "Jens",
"middle": [],
"last": "Lehmann",
"suffix": ""
},
{
"first": "Georgi",
"middle": [],
"last": "Kobilarov",
"suffix": ""
},
{
"first": "S\u00f6ren",
"middle": [],
"last": "Auer",
"suffix": ""
},
{
"first": "Christian",
"middle": [],
"last": "Becker",
"suffix": ""
},
{
"first": "Richard",
"middle": [],
"last": "Cyganiak",
"suffix": ""
},
{
"first": "Sebastian",
"middle": [],
"last": "Hellmann",
"suffix": ""
}
],
"year": 2009,
"venue": "J. Web Sem",
"volume": "7",
"issue": "3",
"pages": "154--165",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christian Bizer, Jens Lehmann, Georgi Kobilarov, S\u00f6ren Auer, Christian Becker, Richard Cyganiak, and Sebastian Hellmann. 2009. Dbpedia -a crys- tallization point for the web of data. J. Web Sem., 7(3):154-165.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Explicit versus latent concept models for cross-language information retrieval",
"authors": [
{
"first": "Philipp",
"middle": [],
"last": "Cimiano",
"suffix": ""
},
{
"first": "Antje",
"middle": [],
"last": "Schultz",
"suffix": ""
},
{
"first": "Sergej",
"middle": [],
"last": "Sizov",
"suffix": ""
},
{
"first": "Philipp",
"middle": [],
"last": "Sorg",
"suffix": ""
},
{
"first": "Steffen",
"middle": [],
"last": "Staab",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the International Joint Conference on Artificial Intelligence (IJCAI)",
"volume": "9",
"issue": "",
"pages": "1513--1518",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Philipp Cimiano, Antje Schultz, Sergej Sizov, Philipp Sorg, and Steffen Staab. 2009. Explicit versus la- tent concept models for cross-language information retrieval. In Proceedings of the International Joint Conference on Artificial Intelligence (IJCAI), vol- ume 9, pages 1513-1518.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Journal of the American society for information science",
"authors": [
{
"first": "Scott",
"middle": [],
"last": "Deerwester",
"suffix": ""
},
{
"first": "Susan",
"middle": [
"T"
],
"last": "Dumais",
"suffix": ""
},
{
"first": "George",
"middle": [
"W"
],
"last": "Furnas",
"suffix": ""
},
{
"first": "Thomas",
"middle": [
"K"
],
"last": "Landauer",
"suffix": ""
},
{
"first": "Richard",
"middle": [],
"last": "Harshman",
"suffix": ""
}
],
"year": 1990,
"venue": "",
"volume": "41",
"issue": "",
"pages": "391--407",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Scott Deerwester, Susan T. Dumais, George W. Fur- nas, Thomas K. Landauer, and Richard Harshman. 1990. Indexing by latent semantic analysis. Jour- nal of the American society for information science, 41(6):391-407.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Principal component neural networks",
"authors": [
{
"first": "I",
"middle": [],
"last": "Konstantinos",
"suffix": ""
},
{
"first": "Sun",
"middle": [
"Y"
],
"last": "Diamantaras",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Kung",
"suffix": ""
}
],
"year": 1996,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Konstantinos I. Diamantaras and Sun Y. Kung. 1996. Principal component neural networks. Wiley New York.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Automatic cross-language retrieval using latent semantic indexing",
"authors": [
{
"first": "Susan",
"middle": [
"T"
],
"last": "Dumais",
"suffix": ""
},
{
"first": "Todd",
"middle": [
"A"
],
"last": "Letsche",
"suffix": ""
},
{
"first": "Michael",
"middle": [
"L"
],
"last": "Littman",
"suffix": ""
},
{
"first": "Thomas",
"middle": [
"K"
],
"last": "Landauer",
"suffix": ""
}
],
"year": 1997,
"venue": "Proc. of AAAI Spring Symposium on Cross-language Text and Speech Retrieval",
"volume": "",
"issue": "",
"pages": "18--24",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Susan T. Dumais, Todd A. Letsche, Michael L. Littman, and Thomas K. Landauer. 1997. Auto- matic cross-language retrieval using latent seman- tic indexing. In Proc. of AAAI Spring Symposium on Cross-language Text and Speech Retrieval, pages 18-24.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "WordNet: An electronic lexical database",
"authors": [
{
"first": "Christiane",
"middle": [],
"last": "Fellbaum",
"suffix": ""
}
],
"year": 1998,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christiane Fellbaum. 1998. WordNet: An electronic lexical database. Bradford Books.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Cross-language plagiarism detection using a multilingual semantic network",
"authors": [
{
"first": "Marc",
"middle": [],
"last": "Franco-Salvador",
"suffix": ""
},
{
"first": "Parth",
"middle": [],
"last": "Gupta",
"suffix": ""
},
{
"first": "Paolo",
"middle": [],
"last": "Rosso",
"suffix": ""
}
],
"year": 2013,
"venue": "Proc. of the 35th European Conference on Information Retrieval (ECIR'13), volume LNCS(7814)",
"volume": "",
"issue": "",
"pages": "710--713",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marc Franco-Salvador, Parth Gupta, and Paolo Rosso. 2013. Cross-language plagiarism detection using a multilingual semantic network. In Proc. of the 35th European Conference on Information Retrieval (ECIR'13), volume LNCS(7814), pages 710-713. Springer-Verlag.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Computing semantic relatedness using wikipediabased explicit semantic analysis",
"authors": [
{
"first": "Evgeniy",
"middle": [],
"last": "Gabrilovich",
"suffix": ""
},
{
"first": "Shaul",
"middle": [],
"last": "Markovitch",
"suffix": ""
}
],
"year": 2007,
"venue": "Proc. of the 20th International Joint Conference on Artifical Intelligence (IJCAI)",
"volume": "",
"issue": "",
"pages": "1606--1611",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Evgeniy Gabrilovich and Shaul Markovitch. 2007. Computing semantic relatedness using wikipedia- based explicit semantic analysis. In Proc. of the 20th International Joint Conference on Artifical Intelli- gence (IJCAI), pages 1606-1611.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "An analytical comparison of approaches to personalizing pagerank",
"authors": [
{
"first": "Taher",
"middle": [],
"last": "Haveliwala",
"suffix": ""
},
{
"first": "Sepandar",
"middle": [],
"last": "Kamvar",
"suffix": ""
},
{
"first": "Glen",
"middle": [],
"last": "Jeh",
"suffix": ""
}
],
"year": 2003,
"venue": "",
"volume": "35",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Taher Haveliwala, Sepandar Kamvar, and Glen Jeh. 2003. An analytical comparison of approaches to personalizing pagerank. Technical Report 2003-35, Stanford InfoLab, June.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Using word dependent transition models in hmm based word alignment for statistical machine translation",
"authors": [
{
"first": "Xiaodong",
"middle": [],
"last": "He",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the Second Workshop on Statistical Machine Translation",
"volume": "",
"issue": "",
"pages": "80--87",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Xiaodong He. 2007. Using word dependent transition models in hmm based word alignment for statistical machine translation. In Proceedings of the Second Workshop on Statistical Machine Translation, pages 80-87. Association for Computational Linguistics.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Yago2: A spatially and temporally enhanced knowledge base from wikipedia",
"authors": [
{
"first": "Johannes",
"middle": [],
"last": "Hoffart",
"suffix": ""
},
{
"first": "Fabian",
"middle": [
"M"
],
"last": "Suchanek",
"suffix": ""
},
{
"first": "Klaus",
"middle": [],
"last": "Berberich",
"suffix": ""
},
{
"first": "Gerhard",
"middle": [],
"last": "Weikum",
"suffix": ""
}
],
"year": 2013,
"venue": "Artificial Intelligence",
"volume": "194",
"issue": "",
"pages": "28--61",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Johannes Hoffart, Fabian M. Suchanek, Klaus Berberich, and Gerhard Weikum. 2013. Yago2: A spatially and temporally enhanced knowledge base from wikipedia. Artificial Intelligence, 194:28-61.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Collaboratively built semistructured content and Artificial Intelligence: The story so far",
"authors": [
{
"first": "Eduard",
"middle": [
"H"
],
"last": "Hovy",
"suffix": ""
},
{
"first": "Roberto",
"middle": [],
"last": "Navigli",
"suffix": ""
},
{
"first": "Simone",
"middle": [
"Paolo"
],
"last": "Ponzetto",
"suffix": ""
}
],
"year": 2013,
"venue": "Artificial Intelligence",
"volume": "194",
"issue": "",
"pages": "2--27",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Eduard H. Hovy, Roberto Navigli, and Simone Paolo Ponzetto. 2013. Collaboratively built semi- structured content and Artificial Intelligence: The story so far. Artificial Intelligence, 194:2-27.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Improving text retrieval for the routing problem using latent semantic indexing",
"authors": [
{
"first": "David",
"middle": [],
"last": "Hull",
"suffix": ""
}
],
"year": 1994,
"venue": "Proceedings of the 17th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR)",
"volume": "",
"issue": "",
"pages": "282--291",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "David Hull. 1994. Improving text retrieval for the routing problem using latent semantic indexing. In Proceedings of the 17th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR), pages 282-291. Springer.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Similarity coefficients: measures of co-occurrence and association or simply measures of occurrence?",
"authors": [
{
"first": "Donald",
"middle": [
"A"
],
"last": "Jackson",
"suffix": ""
},
{
"first": "Keith",
"middle": [
"M"
],
"last": "Somers",
"suffix": ""
},
{
"first": "Harold",
"middle": [
"H"
],
"last": "Harvey",
"suffix": ""
}
],
"year": 1989,
"venue": "American Naturalist",
"volume": "",
"issue": "",
"pages": "436--453",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Donald A. Jackson, Keith M. Somers, and Harold H. Harvey. 1989. Similarity coefficients: measures of co-occurrence and association or simply measures of occurrence? American Naturalist, pages 436-453.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Principal component analysis",
"authors": [
{
"first": "Ian",
"middle": [
"T"
],
"last": "Jolliffe",
"suffix": ""
}
],
"year": 1986,
"venue": "",
"volume": "487",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ian T. Jolliffe. 1986. Principal component analysis, volume 487. Springer-Verlag New York.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Computerized cross-language document retrieval using latent semantic indexing",
"authors": [
{
"first": "K",
"middle": [],
"last": "Thomas",
"suffix": ""
},
{
"first": "Michael",
"middle": [
"L"
],
"last": "Landauer",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Littman",
"suffix": ""
}
],
"year": 1994,
"venue": "US Patent",
"volume": "5",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Thomas K. Landauer and Michael L. Littman. 1994. Computerized cross-language document retrieval using latent semantic indexing, April 5. US Patent 5,301,109.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Introduction to Information Retrieval",
"authors": [
{
"first": "Christopher",
"middle": [
"D"
],
"last": "Manning",
"suffix": ""
},
{
"first": "Prabhakar",
"middle": [],
"last": "Raghavan",
"suffix": ""
},
{
"first": "Hinrich",
"middle": [],
"last": "Sch\u00fctze",
"suffix": ""
}
],
"year": 2008,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christopher D. Manning, Prabhakar Raghavan, and Hinrich Sch\u00fctze. 2008. Introduction to Information Retrieval. Cambridge University Press, New York, NY, USA.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "A graph-based approach to word sense alignment",
"authors": [
{
"first": "",
"middle": [],
"last": "Dijkstra-Wsa",
"suffix": ""
}
],
"year": null,
"venue": "Transactions of the Association for Computational Linguistics (TACL)",
"volume": "1",
"issue": "",
"pages": "151--164",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dijkstra-WSA: A graph-based approach to word sense alignment. Transactions of the Association for Computational Linguistics (TACL), 1:151-164.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Character n-gram tokenization for european language text retrieval",
"authors": [
{
"first": "Paul",
"middle": [],
"last": "Mcnamee",
"suffix": ""
},
{
"first": "James",
"middle": [],
"last": "Mayfield",
"suffix": ""
}
],
"year": 2004,
"venue": "Information Retrieval",
"volume": "7",
"issue": "1-2",
"pages": "73--97",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Paul Mcnamee and James Mayfield. 2004. Charac- ter n-gram tokenization for european language text retrieval. Information Retrieval, 7(1-2):73-97.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Polylingual topic models",
"authors": [
{
"first": "David",
"middle": [],
"last": "Mimno",
"suffix": ""
},
{
"first": "Hanna",
"middle": [
"M"
],
"last": "Wallach",
"suffix": ""
},
{
"first": "Jason",
"middle": [],
"last": "Naradowsky",
"suffix": ""
},
{
"first": "David",
"middle": [
"A"
],
"last": "Smith",
"suffix": ""
},
{
"first": "Andrew",
"middle": [],
"last": "Mccallum",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing",
"volume": "2",
"issue": "",
"pages": "880--889",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "David Mimno, Hanna M. Wallach, Jason Naradowsky, David A. Smith, and Andrew McCallum. 2009. Polylingual topic models. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing: Volume 2-Volume 2, pages 880-889. Association for Computational Linguis- tics.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Flexible comparison of conceptual graphs",
"authors": [
{
"first": "Manuel",
"middle": [],
"last": "Montes Y G\u00f3mez",
"suffix": ""
},
{
"first": "Alexander",
"middle": [
"F"
],
"last": "Gelbukh",
"suffix": ""
},
{
"first": "Aurelio",
"middle": [],
"last": "L\u00f3pez-L\u00f3pez",
"suffix": ""
},
{
"first": "Ricardo",
"middle": [
"A"
],
"last": "Baeza-Yates",
"suffix": ""
}
],
"year": 2001,
"venue": "Proc. of the 12th International Conference on Database and Expert Systems Applications (DEXA)",
"volume": "",
"issue": "",
"pages": "102--111",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Manuel Montes y G\u00f3mez, Alexander F. Gelbukh, Au- relio L\u00f3pez-L\u00f3pez, and Ricardo A. Baeza-Yates. 2001. Flexible comparison of conceptual graphs. In Proc. of the 12th International Conference on Database and Expert Systems Applications (DEXA), pages 102-111.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Improving machine translation performance by exploiting non-parallel corpora",
"authors": [
{
"first": "Stefan",
"middle": [],
"last": "Dragos",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Munteanu",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Marcu",
"suffix": ""
}
],
"year": 2005,
"venue": "Computational Linguistics",
"volume": "31",
"issue": "4",
"pages": "477--504",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dragos Stefan Munteanu and Daniel Marcu. 2005. Im- proving machine translation performance by exploit- ing non-parallel corpora. Computational Linguis- tics, 31(4):477-504.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Transforming wikipedia into a large scale multilingual concept network",
"authors": [
{
"first": "Vivi",
"middle": [],
"last": "Nastase",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Strube",
"suffix": ""
}
],
"year": 2013,
"venue": "Artificial Intelligence",
"volume": "194",
"issue": "",
"pages": "62--85",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Vivi Nastase and Michael Strube. 2013. Transform- ing wikipedia into a large scale multilingual concept network. Artificial Intelligence, 194:62-85.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "An experimental study of graph connectivity for unsupervised word sense disambiguation",
"authors": [
{
"first": "Roberto",
"middle": [],
"last": "Navigli",
"suffix": ""
},
{
"first": "Mirella",
"middle": [],
"last": "Lapata",
"suffix": ""
}
],
"year": 2010,
"venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence",
"volume": "32",
"issue": "4",
"pages": "678--692",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Roberto Navigli and Mirella Lapata. 2010. An ex- perimental study of graph connectivity for unsuper- vised word sense disambiguation. IEEE Transac- tions on Pattern Analysis and Machine Intelligence, 32(4):678-692.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "BabelNet: The automatic construction, evaluation and application of a wide-coverage multilingual semantic network",
"authors": [
{
"first": "Roberto",
"middle": [],
"last": "Navigli",
"suffix": ""
},
{
"first": "Simone",
"middle": [
"Paolo"
],
"last": "Ponzetto",
"suffix": ""
}
],
"year": 2012,
"venue": "Artificial Intelligence",
"volume": "193",
"issue": "",
"pages": "217--250",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Roberto Navigli and Simone Paolo Ponzetto. 2012a. BabelNet: The automatic construction, evaluation and application of a wide-coverage multilingual se- mantic network. Artificial Intelligence, 193:217- 250.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "BabelRelate! a joint multilingual approach to computing semantic relatedness",
"authors": [
{
"first": "Roberto",
"middle": [],
"last": "Navigli",
"suffix": ""
},
{
"first": "Simone",
"middle": [
"Paolo"
],
"last": "Ponzetto",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the Twenty-Sixth AAAI Conference on Artificial Intelligence (AAAI-12)",
"volume": "",
"issue": "",
"pages": "108--114",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Roberto Navigli and Simone Paolo Ponzetto. 2012b. BabelRelate! a joint multilingual approach to com- puting semantic relatedness. In Proceedings of the Twenty-Sixth AAAI Conference on Artificial Intelli- gence (AAAI-12), pages 108-114, Toronto, Canada.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "The PageRank Citation Ranking: Bringing Order to the Web",
"authors": [
{
"first": "Lawrence",
"middle": [],
"last": "Page",
"suffix": ""
},
{
"first": "Sergey",
"middle": [],
"last": "Brin",
"suffix": ""
},
{
"first": "Rajeev",
"middle": [],
"last": "Motwani",
"suffix": ""
},
{
"first": "Terry",
"middle": [],
"last": "Winograd",
"suffix": ""
}
],
"year": 1998,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lawrence Page, Sergey Brin, Rajeev Motwani, and Terry Winograd. 1998. The PageRank Citation Ranking: Bringing Order to the Web. Technical re- port, Stanford Digital Library Technologies Project.",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "Translingual document representations from discriminative projections",
"authors": [
{
"first": "John",
"middle": [
"C"
],
"last": "Platt",
"suffix": ""
},
{
"first": "Kristina",
"middle": [],
"last": "Toutanova",
"suffix": ""
},
{
"first": "Wen-Tau",
"middle": [],
"last": "Yih",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "251--261",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "John C. Platt, Kristina Toutanova, and Wen-tau Yih. 2010. Translingual document representations from discriminative projections. In Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing, pages 251-261.",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "A wikipedia-based multilingual retrieval model",
"authors": [
{
"first": "Martin",
"middle": [],
"last": "Potthast",
"suffix": ""
},
{
"first": "Benno",
"middle": [],
"last": "Stein",
"suffix": ""
},
{
"first": "Maik",
"middle": [],
"last": "Anderka",
"suffix": ""
}
],
"year": 2008,
"venue": "Advances in Information Retrieval",
"volume": "",
"issue": "",
"pages": "522--530",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Martin Potthast, Benno Stein, and Maik Anderka. 2008. A wikipedia-based multilingual retrieval model. In Advances in Information Retrieval, pages 522-530. Springer.",
"links": null
},
"BIBREF32": {
"ref_id": "b32",
"title": "Cross-language plagiarism detection. Language Resources and Evaluation",
"authors": [
{
"first": "Martin",
"middle": [],
"last": "Potthast",
"suffix": ""
},
{
"first": "Alberto",
"middle": [],
"last": "Barr\u00f3n-Cede\u00f1o",
"suffix": ""
},
{
"first": "Benno",
"middle": [],
"last": "Stein",
"suffix": ""
},
{
"first": "Paolo",
"middle": [],
"last": "Rosso",
"suffix": ""
}
],
"year": 2011,
"venue": "",
"volume": "45",
"issue": "",
"pages": "45--62",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Martin Potthast, Alberto Barr\u00f3n-Cede\u00f1o, Benno Stein, and Paolo Rosso. 2011. Cross-language plagia- rism detection. Language Resources and Evalua- tion, 45(1):45-62.",
"links": null
},
"BIBREF33": {
"ref_id": "b33",
"title": "Portage: A phrase-based machine translation system",
"authors": [
{
"first": "Fatiha",
"middle": [],
"last": "Sadat",
"suffix": ""
},
{
"first": "Howard",
"middle": [],
"last": "Johnson",
"suffix": ""
},
{
"first": "Akakpo",
"middle": [],
"last": "Agbago",
"suffix": ""
},
{
"first": "George",
"middle": [],
"last": "Foster",
"suffix": ""
},
{
"first": "Joel",
"middle": [],
"last": "Martin",
"suffix": ""
},
{
"first": "Aaron",
"middle": [],
"last": "Tikuisis",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the ACL Workshop on Building and Using Parallel Texts",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Fatiha Sadat, Howard Johnson, Akakpo Agbago, George Foster, Joel Martin, and Aaron Tikuisis. 2005. Portage: A phrase-based machine translation system. In Proceedings of the ACL Workshop on Building and Using Parallel Texts, Ann Arbor, USA.",
"links": null
},
"BIBREF34": {
"ref_id": "b34",
"title": "Termweighting approaches in automatic text retrieval",
"authors": [
{
"first": "Gerard",
"middle": [],
"last": "Salton",
"suffix": ""
},
{
"first": "Christopher",
"middle": [],
"last": "Buckley",
"suffix": ""
}
],
"year": 1988,
"venue": "formation processing & management",
"volume": "24",
"issue": "",
"pages": "513--523",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gerard Salton and Christopher Buckley. 1988. Term- weighting approaches in automatic text retrieval. In- formation processing & management, 24(5):513- 523.",
"links": null
},
"BIBREF35": {
"ref_id": "b35",
"title": "Introduction to Modern Information Retrieval",
"authors": [
{
"first": "Gerard",
"middle": [],
"last": "Salton",
"suffix": ""
},
{
"first": "Michael",
"middle": [
"J"
],
"last": "Mcgill",
"suffix": ""
}
],
"year": 1986,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gerard Salton and Michael J. McGill. 1986. Intro- duction to Modern Information Retrieval. McGraw- Hill, Inc., New York, NY, USA.",
"links": null
},
"BIBREF36": {
"ref_id": "b36",
"title": "Extended boolean information retrieval",
"authors": [
{
"first": "Gerard",
"middle": [],
"last": "Salton",
"suffix": ""
},
{
"first": "Edward",
"middle": [
"A"
],
"last": "Fox",
"suffix": ""
},
{
"first": "Harry",
"middle": [],
"last": "Wu",
"suffix": ""
}
],
"year": 1983,
"venue": "Communications of the ACM",
"volume": "26",
"issue": "11",
"pages": "1022--1036",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gerard Salton, Edward A. Fox, and Harry Wu. 1983. Extended boolean information retrieval. Communi- cations of the ACM, 26(11):1022-1036.",
"links": null
},
"BIBREF37": {
"ref_id": "b37",
"title": "Canonical correlation analysis. Encyclopedia of statistics in behavioral science",
"authors": [
{
"first": "Bruce",
"middle": [],
"last": "Thompson",
"suffix": ""
}
],
"year": 2005,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bruce Thompson. 2005. Canonical correlation analy- sis. Encyclopedia of statistics in behavioral science.",
"links": null
},
"BIBREF38": {
"ref_id": "b38",
"title": "EuroWordNet: A multilingual database of autonomous and language-specific wordnets connected via an inter-lingual index",
"authors": [
{
"first": "Piek",
"middle": [],
"last": "Vossen",
"suffix": ""
}
],
"year": 2004,
"venue": "International Journal of Lexicography",
"volume": "17",
"issue": "2",
"pages": "161--173",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Piek Vossen. 2004. EuroWordNet: A multilin- gual database of autonomous and language-specific wordnets connected via an inter-lingual index. In- ternational Journal of Lexicography, 17(2):161- 173.",
"links": null
},
"BIBREF39": {
"ref_id": "b39",
"title": "Learning discriminative projections for text similarity measures",
"authors": [
{
"first": "Kristina",
"middle": [],
"last": "Wen-Tau Yih",
"suffix": ""
},
{
"first": "John",
"middle": [
"C"
],
"last": "Toutanova",
"suffix": ""
},
{
"first": "Christopher",
"middle": [],
"last": "Platt",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Meek",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the Fifteenth Conference on Computational Natural Language Learning",
"volume": "",
"issue": "",
"pages": "247--256",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Wen-tau Yih, Kristina Toutanova, John C. Platt, and Christopher Meek. 2011. Learning discriminative projections for text similarity measures. In Proceed- ings of the Fifteenth Conference on Computational Natural Language Learning, pages 247-256.",
"links": null
}
},
"ref_entries": {
"TABREF1": {
"html": null,
"content": "<table/>",
"text": "Test results for comparable document retrieval in Wikipedia. S2Net, OPCA, CosSim E , CCA and CL-LSI are from(Yih et al., 2011).",
"num": null,
"type_str": "table"
},
"TABREF3": {
"html": null,
"content": "<table/>",
"text": "Test results for cross-language text categorization. Full MT, OPCA, CCA, CL-LSI and CosSim",
"num": null,
"type_str": "table"
},
"TABREF4": {
"html": null,
"content": "<table><tr><td>Testing</td><td/><td colspan=\"3\">Training datasets</td><td/></tr><tr><td>datasets</td><td>DE</td><td>EN</td><td>ES</td><td>FR</td><td>IT</td></tr><tr><td>DE</td><td>0.8053 0</td><td/><td/><td/><td/></tr></table>",
"text": ".6872 0.5373 0.6417 0.5920 EN 0.5827 0.8463 0.5540 0.6530 0.5820 ES 0.5883 0.6153 0.8707 0.6237 0.7010 FR 0.6867 0.7103 0.6667 0.8227 0.6887 IT 0.5973 0.5487 0.6263 0.5973 0.8317",
"num": null,
"type_str": "table"
},
"TABREF5": {
"html": null,
"content": "<table/>",
"text": "KBSim accuracy in a multilingual setup.",
"num": null,
"type_str": "table"
}
}
}
}