| { |
| "paper_id": "Q14-1034", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:11:44.000595Z" |
| }, |
| "title": "Extracting Lexically Divergent Paraphrases from Twitter", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Pennsylvania", |
| "location": { |
| "settlement": "Philadelphia", |
| "region": "PA", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Ohio State University", |
| "location": { |
| "settlement": "Columbus", |
| "region": "OH", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Pennsylvania", |
| "location": { |
| "settlement": "Philadelphia", |
| "region": "PA", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "B" |
| ], |
| "last": "Dolan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Research", |
| "location": { |
| "settlement": "Redmond", |
| "region": "WA", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Georgia Institute of Technology", |
| "location": { |
| "settlement": "Atlanta", |
| "region": "GA", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present MULTIP (Multi-instance Learning Paraphrase Model), a new model suited to identify paraphrases within the short messages on Twitter. We jointly model paraphrase relations between word and sentence pairs and assume only sentence-level annotations during learning. Using this principled latent variable model alone, we achieve the performance competitive with a state-of-the-art method which combines a latent space model with a feature-based supervised classifier. Our model also captures lexically divergent paraphrases that differ from yet complement previous methods; combining our model with previous work significantly outperforms the stateof-the-art. In addition, we present a novel annotation methodology that has allowed us to crowdsource a paraphrase corpus from Twitter. We make this new dataset available to the research community.", |
| "pdf_parse": { |
| "paper_id": "Q14-1034", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present MULTIP (Multi-instance Learning Paraphrase Model), a new model suited to identify paraphrases within the short messages on Twitter. We jointly model paraphrase relations between word and sentence pairs and assume only sentence-level annotations during learning. Using this principled latent variable model alone, we achieve the performance competitive with a state-of-the-art method which combines a latent space model with a feature-based supervised classifier. Our model also captures lexically divergent paraphrases that differ from yet complement previous methods; combining our model with previous work significantly outperforms the stateof-the-art. In addition, we present a novel annotation methodology that has allowed us to crowdsource a paraphrase corpus from Twitter. We make this new dataset available to the research community.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Paraphrases are alternative linguistic expressions of the same or similar meaning (Bhagat and Hovy, 2013) . Twitter engages millions of users, who naturally talk about the same topics simultaneously and frequently convey similar meaning using diverse linguistic expressions. The unique characteristics of this user-generated text presents new challenges and opportunities for paraphrase research (Xu et al., 2013b; Wang et al., 2013) . For many applications, like automatic summarization, first story detection (Petrovi\u0107 et al., 2012) and search (Zanzotto et al., 2011) , it is crucial to resolve redundancy in tweets (e.g. oscar nom'd doc \u2194 Oscar-nominated documentary).", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 105, |
| "text": "(Bhagat and Hovy, 2013)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 396, |
| "end": 414, |
| "text": "(Xu et al., 2013b;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 415, |
| "end": 433, |
| "text": "Wang et al., 2013)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 511, |
| "end": 534, |
| "text": "(Petrovi\u0107 et al., 2012)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 546, |
| "end": 569, |
| "text": "(Zanzotto et al., 2011)", |
| "ref_id": "BIBREF57" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we investigate the task of determining whether two tweets are paraphrases. Previous work has exploited a pair of shared named entities to locate semantically equivalent patterns from related news articles (Shinyama et al., 2002; Sekine, 2005; Zhang and Weld, 2013) . But short sentences in Twitter do not often mention two named entities (Ritter et al., 2012) and require nontrivial generalization from named entities to other words. For example, consider the following two sentences about basketball player Brook Lopez from Twitter:", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 243, |
| "text": "(Shinyama et al., 2002;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 244, |
| "end": 257, |
| "text": "Sekine, 2005;", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 258, |
| "end": 279, |
| "text": "Zhang and Weld, 2013)", |
| "ref_id": "BIBREF60" |
| }, |
| { |
| "start": 353, |
| "end": 374, |
| "text": "(Ritter et al., 2012)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 That boy Brook Lopez with a deep 3 \u2022 brook lopez hit a 3 and i missed it Although these sentences do not have many words in common, the identical word \"3\" is a strong indicator that the two sentences are paraphrases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We therefore propose a novel joint word-sentence approach, incorporating a multi-instance learning assumption (Dietterich et al., 1997) that two sentences under the same topic (we highlight topics in bold) are paraphrases if they contain at least one word pair (we call it an anchor and highlight with underscores; the words in the anchor pair need not be identical) that is indicative of sentential paraphrase. This at-least-one-anchor assumption might be ineffective for long or randomly paired sentences, but holds up better for short sentences that are temporally and topically related on Twitter. Moreover, our model design (see Figure 1 ) allows exploitation of arbitrary features and linguistic resources, such as part-of-speech features and a normalization lex- Figure 1 : (a) a plate representation of the MULTIP model (b) an example instantiation of MULTIP for the pair of sentences \"Manti bout to be the next Junior Seau\" and \"Teo is the little new Junior Seau\", in which a new American football player Manti Te'o was being compared to a famous former player Junior Seau. Only 4 out of the total 6 \u00d7 5 word pairs, z 1z 30 , are shown here.", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 135, |
| "text": "(Dietterich et al., 1997)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 634, |
| "end": 642, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 770, |
| "end": 778, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "icon, to discriminatively determine word pairs as paraphrastic anchors or not.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our graphical model is a major departure from popular surface-or latent-similarity methods (Wan et al., 2006; Guo and Diab, 2012; Ji and Eisenstein, 2013, and others) . Our approach to extract paraphrases from Twitter is general and can be combined with various topic detecting solutions. As a demonstration, we use Twitter's own trending topic service 1 to collect data and conduct experiments. While having a principled and extensible design, our model alone achieves performance on par with a state-of-the-art ensemble approach that involves both latent semantic modeling and supervised classification. The proposed model also captures radically different paraphrases from previous approaches; a combined system shows significant improvement over the state-of-the-art.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 109, |
| "text": "(Wan et al., 2006;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 110, |
| "end": 129, |
| "text": "Guo and Diab, 2012;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 130, |
| "end": 166, |
| "text": "Ji and Eisenstein, 2013, and others)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper makes the following contributions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "1) We present a novel latent variable model for paraphrase identification, that specifically accommodates the very short context and divergent wording in Twitter data. We experimentally compare several representative approaches and show that our proposed method 1 More information about Twitter's trends: https://support.twitter.com/articles/ 101125-faqs-about-twitter-s-trends yields state-of-the-art results and identifies paraphrases that are complementary to previous methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We develop an efficient crowdsourcing method and construct a Twitter Paraphrase Corpus of about 18,000 sentence pairs, as a first common testbed for the development and comparison of paraphrase identification and semantic similarity systems. We make this dataset available to the research community. 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "2)", |
| "sec_num": null |
| }, |
| { |
| "text": "We present a new latent variable model that jointly captures paraphrase relations between sentence pairs and word pairs. It is very different from previous approaches in that its primary design goal and motivation is targeted towards short, lexically diverse text on the social web.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Joint Word-Sentence Paraphrase Model", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Much previous work on paraphrase identification has been developed and evaluated on a specific benchmark dataset, the Microsoft Research Paraphrase Corpus (Dolan et al., 2004) , which is de-", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 175, |
| "text": "(Dolan et al., 2004)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "At-least-one-anchor Assumption", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 Revenue in the first quarter of the year dropped 15 percent from the same period a year earlier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "News", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 With the scandal hanging over Stewart's company, revenue in the first quarter of the year dropped 15 percent from the same period a year earlier. (Dolan and Brockett, 2005) \u2022 The Senate Select Committee on Intelligence is preparing a blistering report on prewar intelligence on Iraq.", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 174, |
| "text": "(Dolan and Brockett, 2005)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "News", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 American intelligence leading up to the war on Iraq will be criticized by a powerful US Congressional committee due to report soon, officials said today.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "News", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 rived from news articles. Twitter data is very different, as shown in Table 1 . We observe that among tweets posted around the same time about the same topic (e.g. a named entity), sentential paraphrases are short and can often be \"anchored\" by lexical paraphrases. This intuition leads to the at-least-oneanchor assumption we stated in the introduction. The anchor could be a word the two sentences share in common. It also could be a pair of different words. For example, the word pair \"next new\" in two tweets about a new player Manti Te'o to a famous former American football player Junior Seau:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 72, |
| "end": 79, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "News", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 Manti bout to be the next Junior Seau \u2022 Teo is the little new Junior Seau Further note that not every word pair of similar meaning indicates sentence-level paraphrase. For example, the word \"3\", shared by two sentences about movie \"Iron Man\" that refers to the 3rd sequel of the movie, is not a paraphrastic anchor:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "News", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 Iron Man 3 was brilliant fun \u2022 Iron Man 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "News", |
| "sec_num": null |
| }, |
| { |
| "text": "Therefore, we use a discriminative model at the word-level to incorporate various features, such as part-of-speech features, to determine how probable a word pair is a paraphrase anchor.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "tonight see what this is like", |
| "sec_num": null |
| }, |
| { |
| "text": "The at-least-one-anchor assumption naturally leads to a multi-instance learning problem (Dietterich et al., 1997) , where the learner only observes labels on bags of instances (i.e. sentence-level paraphrases in this case) instead of labels on each individual instance (i.e. word pair). We formally define an undirected graphical model of multi-instance learning for paraphrase identification -MULTIP. Figure 1 shows the proposed model in plate form and gives an example instantiation. The model has two layers, which allows joint reasoning between sentence-level and word-level components.", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 113, |
| "text": "(Dietterich et al., 1997)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 402, |
| "end": 410, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "For each pair of sentences", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "s i = (s i 1 , s i 2 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": ", there is an aggregate binary variable y i that represents whether they are paraphrases, and which is observed in the labeled training data. Let W (s i k ) be the set of words in the sentence s i k , excluding the topic names. For each word pair", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "w j = (w j 1 , w j 2 ) \u2208 W (s i 1 ) \u00d7 W (s i 2 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": ", there exists a latent variable z j which denotes whether the word pair is a paraphrase anchor. In total there are m = |W (s i 1 )| \u00d7 |W (s i 2 )| word pairs, and thus z i = z 1 , z 2 , ..., z j , ..., z m . Our at-least-one-anchor assumption is realized by a deterministic-or function; that is, if there exists at least one j such that z j = 1, then the sentence pair is a paraphrase.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Our conditional paraphrase identification model is defined as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "P (z i , y i |w i ; \u03b8) = m j=1 \u03c6(z j , w j ; \u03b8) \u00d7 \u03c3(z i , y i ) = m j=1 exp(\u03b8 \u2022 f (z j , w j )) \u00d7 \u03c3(z i , y i ) (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where f (z j , w j ) is a vector of features extracted for the word pair w j , \u03b8 is the parameter vector, and \u03c3 is the factor that corresponds to the deterministic-or constraint:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u03c3(z i , y i ) = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 1 if y i = true \u2227 \u2203j : z j = 1 1 if y i = f alse \u2227 \u2200j : z j = 0 0 otherwise (2) 2.3 Learning", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "To learn the parameters of the word-level paraphrase anchor classifier, \u03b8, we maximize likelihood over the sentence-level annotations in our paraphrase corpus:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b8 * = arg max \u03b8 P (y|w; \u03b8) = arg max \u03b8 i z i P (z i , y i |w i ; \u03b8)", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "An iterative gradient-ascent approach is used to estimate \u03b8 using perceptron-style additive updates (Collins, 2002; Liang et al., 2006; Zettlemoyer and Collins, 2007; Hoffmann et al., 2011) . We define an update based on the gradient of the conditional log likelihood using Viterbi approximation, as follows:", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 115, |
| "text": "(Collins, 2002;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 116, |
| "end": 135, |
| "text": "Liang et al., 2006;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 136, |
| "end": 166, |
| "text": "Zettlemoyer and Collins, 2007;", |
| "ref_id": "BIBREF58" |
| }, |
| { |
| "start": 167, |
| "end": 189, |
| "text": "Hoffmann et al., 2011)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2202 log P (y|w; \u03b8) \u2202\u03b8 = E P (z|w,y;\u03b8) ( i f (z i , w i )) \u2212 E P (z,y|w;\u03b8) ( i f (z i , w i )) \u2248 i f (z * i , w i ) \u2212 i f (z i , w i )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where we define the feature sum for each sentence", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "f (z i , w i ) = j f (z j , w j )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "over all word pairs. These two above expectations are approximated by solving two simple inference problems as maximizations:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "z * = arg max z P (z|w, y; \u03b8) y , z = arg max y,z P (z, y|w; \u03b8)", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Input: a training set {(s i , y i )|i = 1...n}, where i is an index corresponding to a particular sentence pair s i , and y i is the training label.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "1: initialize parameter vector \u03b8 \u2190 0 2: for i \u2190 1 to n do 3:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "extract all possible word pairs w i = w 1 , w 2 , ..., w m and their features from the sentence pair s i 4: end for 5: for l \u2190 1 to maximum iterations do 6:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "for i \u2190 1 to n do 7: (y i , z i ) \u2190 arg max yi,zi P (z i , y i |w i ; \u03b8) 8: if y i = y i then 9: z * i \u2190 arg max zi P (z i |w i , y i ; \u03b8)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "10:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u03b8 \u2190 \u03b8 + f (z * i , w i ) \u2212 f (z i , w i ) 11: end if 12:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "end for 13: end for 14: return model parameters \u03b8 Computing both z and z * are rather straightforward under the structure of our model and can be solved in time linear in the number of word pairs. The dependencies between z and y are defined as deterministic-or factors \u03c3(z i , y i ), which when satisfied do not affect the overall probability of the solution. Each sentence pair is independent conditioned on the parameters. For z , it is sufficient to independently compute the most likely assignment z i for each word pair, ignoring the deterministic dependencies. y i is then set by aggregating all z i through the deterministic-or operation. Similarly, we can find the exact solution for z * , the most likely assignment that respects the sentencelevel training label y. For a positive training instance, we simply find its highest scored word pair w \u03c4 by the word-level classifier, then set z * \u03c4 = 1 and z * j = arg max x\u22080,1 \u03c6(x, w j ; \u03b8) for all j = \u03c4 ; for a negative example, we set z * i = 0. The time complexity of both inferences for one sentence pair is O(|W (s)| 2 ), where |W (s)| 2 is the number of word pairs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In practice, we use online learning instead of optimizing the full objective. The detailed learning algorithm is presented in Figure 2 . Following Hoffmann et al. 2011, we use 50 iterations in the experiments.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 126, |
| "end": 134, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Multi-instance Learning Paraphrase Model (MULTIP)", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "At the word-level, our discriminative model allows use of arbitrary features that are similar to those in monolingual word alignment models (MacCartney et al., 2008; Thadani and McKeown, 2011; Yao et al., 2013a,b) . But unlike discriminative monolingual word alignment, we only use sentence-level training labels instead of word-level alignment annotation. For every word pair, we extract the following features:", |
| "cite_spans": [ |
| { |
| "start": 140, |
| "end": 165, |
| "text": "(MacCartney et al., 2008;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 166, |
| "end": 192, |
| "text": "Thadani and McKeown, 2011;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 193, |
| "end": 213, |
| "text": "Yao et al., 2013a,b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Design", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "String Features that indicate whether the two words, their stemmed forms and their normalized forms are the same, similar or dissimilar. We used the Morpha stemmer (Minnen et al., 2001 ), 3 Jaro-Winkler string similarity (Winkler, 1999) and the Twitter normalization lexicon by Han et al. (2012) .", |
| "cite_spans": [ |
| { |
| "start": 164, |
| "end": 184, |
| "text": "(Minnen et al., 2001", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 221, |
| "end": 236, |
| "text": "(Winkler, 1999)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 278, |
| "end": 295, |
| "text": "Han et al. (2012)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Design", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "POS Features that are based on the part-of-speech tags of the two words in the pair, specifying whether the two words have same or different POS tags and what the specific tags are. We use the Twitter Part-Of-Speech tagger developed by Derczynski et al. (2013) . We add new fine-grained tags for variations of the eight words: \"a\", \"be\", \"do\", \"have\", \"get\", \"go\", \"follow\" and \"please\". For example, we use a tag HA for words \"have\", \"has\" and \"had\".", |
| "cite_spans": [ |
| { |
| "start": 236, |
| "end": 260, |
| "text": "Derczynski et al. (2013)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Design", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Topical Features that relate to the strength of a word's association to the topic. This feature identifies the popular words in each topic, e.g. \"3\" in tweets about basketball game, \"RIP\" in tweets about a celebrity's death. We use G 2 log-likelihoodratio statistic, which has been frequently used in NLP, as a measure of word associations (Dunning, 1993; Moore, 2004) . The significant scores are computed for each trend on an average of about 1500 sentences and converted to binary features for every word pair, indicating whether the two words are both significant or not.", |
| "cite_spans": [ |
| { |
| "start": 340, |
| "end": 355, |
| "text": "(Dunning, 1993;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 356, |
| "end": 368, |
| "text": "Moore, 2004)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Design", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Our topical features are novel and were not used in previous work. Following Riedel et al. (2010) and Hoffmann et al. (2011) , we also incorporate conjunction features into our system for better accuracy, namely Word+POS, Word+Topical and Word+POS+Topical features.", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 97, |
| "text": "Riedel et al. (2010)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 102, |
| "end": 124, |
| "text": "Hoffmann et al. (2011)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Design", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "3 https://github.com/knowitall/morpha 3 Experiments", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Design", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "It is nontrivial to gather a gold-standard dataset of naturally occurring paraphrases and nonparaphrases efficiently from Twitter, since this requires pairwise comparison of tweets and faces a very large search space. To make this annotation task tractable, we design a novel and efficient crowdsourcing method using Amazon Mechanical Turk. Our entire data collection process is detailed in Section \u00a74, with several experiments that demonstrate annotation quality and efficiency.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In total, we constructed a Twitter Paraphrase Corpus of 18,762 sentence pairs and 19,946 unique sentences. The training and development set consists of 17,790 sentence pairs posted between April 24th and May 3rd, 2014 from 500+ trending topics (excluding hashtags). Our paraphrase model and data collection approach is general and can be combined with various Twitter topic detecting solutions (Diao et al., 2012; Ritter et al., 2012) . As a demonstration, we use Twitter's own trends service since it is easily available. Twitter trending topics are determined by an unpublished algorithm, which finds words, phrases and hashtags that have had a sharp increase in popularity, as opposed to overall volume. We use case-insensitive exact matching to locate topic names in the sentences.", |
| "cite_spans": [ |
| { |
| "start": 394, |
| "end": 413, |
| "text": "(Diao et al., 2012;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 414, |
| "end": 434, |
| "text": "Ritter et al., 2012)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Each sentence pair was annotated by 5 different crowdsourcing workers. For the test set, we obtained both crowdsourced and expert labels on 972 sentence pairs from 20 randomly sampled Twitter trending topics between May 13th and June 10th. Our dataset is more realistic and balanced, containing 79% non-paraphrases vs. 34% in the benchmark Microsoft Paraphrase Corpus of news data. As noted in (Das and Smith, 2009) , the lack of natural nonparaphrases in the MSR corpus creates bias towards certain models.", |
| "cite_spans": [ |
| { |
| "start": 394, |
| "end": 415, |
| "text": "(Das and Smith, 2009)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We use four baselines to compare with our proposed approach for the sentential paraphrase identification task. For the first baseline, we choose a supervised logistic regression (LR) baseline used by Das and Smith (2009) . It uses simple n-gram (also in stemmed form) overlapping features but shows very Das and Smith (2009) .", |
| "cite_spans": [ |
| { |
| "start": 200, |
| "end": 220, |
| "text": "Das and Smith (2009)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 304, |
| "end": 324, |
| "text": "Das and Smith (2009)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "competitive performance on the MSR corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The second baseline is a state-of-the-art unsupervised method, Weighted Textual Matrix Factorization (WTMF), 4 which is specially developed for short sentences by modeling the semantic space of both words that are present in and absent from the sentences (Guo and Diab, 2012) . The original model was learned from WordNet (Fellbaum, 2010) , OntoNotes (Hovy et al., 2006) , Wiktionary, the Brown corpus (Francis and Kucera, 1979) . We enhance the model with 1.6 million sentences from Twitter as suggested by Guo et al. (2013) .", |
| "cite_spans": [ |
| { |
| "start": 255, |
| "end": 275, |
| "text": "(Guo and Diab, 2012)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 322, |
| "end": 338, |
| "text": "(Fellbaum, 2010)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 351, |
| "end": 370, |
| "text": "(Hovy et al., 2006)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 402, |
| "end": 428, |
| "text": "(Francis and Kucera, 1979)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 508, |
| "end": 525, |
| "text": "Guo et al. (2013)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Ji and Eisenstein (2013) presented a state-ofthe-art ensemble system, which we call LEXDIS-CRIM. 5 It directly combines both discriminativelytuned latent features and surface lexical features into a SVM classifier. Specifically, the latent representation of a pair of sentences v 1 and v 2 is converted into a feature vector,", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 98, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "[ v 1 + v 2 , | v 1 \u2212 v 2 |], by concatenating the element-wise sum v 1 + v 2 and absolute different | v 1 \u2212 v 2 |.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We also introduce a new baseline, LEXLATENT, which is a simplified version of LEXDISCRIM and easy to reproduce. It uses the same method to combine latent features and surface features, but combines the open-sourced WTMF latent space model and the logistic regression model from above instead. It achieves similar performance as LEXDIS-CRIM on our dataset (Table 2) . 4 The source code and data for WTMF is available at: http://www.cs.columbia.edu/\u02dcweiwei/code. html 5 The parsing feature was removed because it was not helpful on our Twitter dataset.", |
| "cite_spans": [ |
| { |
| "start": 367, |
| "end": 368, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 355, |
| "end": 364, |
| "text": "(Table 2)", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "For evaluation of different systems, we compute precision-recall curves and report the highest F1 measure of any point on the curve, on the test dataset of 972 sentence pairs against the expert labels. Table 2 shows the performance of different systems. Our proposed MULTIP, a principled latent variable model alone, achieves competitive results with the state-of-the-art system that combines discriminative training and latent semantics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Performance", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In Table 2 , we also show the agreement levels of labels derived from 5 non-expert annotations on Mechanical Turk, which can be considered as an upperbound for automatic paraphrase recognition task performed on this data set. The annotation quality of our corpus is surprisingly good given the fact that the definition of paraphrase is rather inexact (Bhagat and Hovy, 2013) ; the inter-rater agreement between expert annotators on news data is only 0.83 as reported by Dolan et al. (2004) .", |
| "cite_spans": [ |
| { |
| "start": 351, |
| "end": 374, |
| "text": "(Bhagat and Hovy, 2013)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 470, |
| "end": 489, |
| "text": "Dolan et al. (2004)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "System Performance", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Prec and POS features are essential for system performance, while topical features are helpful but not as crucial. Figure 3 presents precision-recall curves and shows the sensitivity and specificity of each model in comparison. In the first half of the curve (recall < 0.5), MULTIP model makes bolder and less accurate decisions than LEXLATENT. However, the curve for MULTIP model is more flat and shows con-sistently better precision at the second half (recall > 0.5) as well as a higher maximum F1 score. This result reflects our design concept of MULTIP, which is intended to pick up sentential paraphrases with more divergent wordings aggressively. LEXLATENT, as a combined system, considers sentence features in both surface and latent space and is more conservative. Table 4 further illustrates this difference with some example system outputs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 115, |
| "end": 123, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 773, |
| "end": 780, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "F1", |
| "sec_num": null |
| }, |
| { |
| "text": "Our MULTIP model and previous similarity-based approaches have complementary strengths, so we experiment with combining MULTIP (P m ) and LEXLATENT (P l ) through a product of experts (Hinton, 2002) :", |
| "cite_spans": [ |
| { |
| "start": 184, |
| "end": 198, |
| "text": "(Hinton, 2002)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Product of Experts (MULTIP-PE)", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (y|s 1 , s 2 ) = P m (y|s 1 , s 2 ) \u00d7 P l (y|s 1 , s 2 ) y P m (y|s 1 , s 2 ) \u00d7 P l (y|s 1 , s 2 )", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Product of Experts (MULTIP-PE)", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "The resulting system MULTIP-PE provides consistently better precision and recall over the LEXLATENT model, as shown on the right in Figure 3 . The MULTIP-PE system outperforms LEXLATENT significantly according to a paired ttest with \u03c1 less than 0.05. Our proposed MUL-TIP takes advantage of Twitter's specific properties and provides complementary information to previous approaches. Previously, Das and Smith (2009) has also used a product of experts to combine a lexical and a syntax-based model together.", |
| "cite_spans": [ |
| { |
| "start": 396, |
| "end": 416, |
| "text": "Das and Smith (2009)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 132, |
| "end": 140, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Product of Experts (MULTIP-PE)", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We now turn to describing our data collection and annotation methodology. Our goal is to construct a high quality dataset that contains representative examples of paraphrases and non-paraphrases in Twitter. Since Twitter users are free to talk about anything regarding any topic, a random pair of sentences about the same topic has a low chance (less than 8%) of expressing the same meaning. This causes two problems: a) it is expensive to obtain paraphrases via manual annotation; b) non-expert annotators tend to loosen the criteria and are more likely to make false positive errors. To address these challenges, we design a simple annotation task and introduce two selection mechanisms to select sentences which are more likely to be paraphrases, while preserving diversity and representativeness.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constructing Twitter Paraphrase Corpus", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We crawl Twitter's trending topics and their associated tweets using public APIs. 6 According to Twitter, trends are determined by an algorithm which 6 More information about Twitter's APIs: https://dev. twitter.com/docs/api/1.1/overview Figure 4: A heat-map showing overlap between expert and crowdsourcing annotation. The intensity along the diagonal indicates good reliability of crowdsourcing workers for this particular task; and the shift above the diagonal reflects the difference between the two annotation schemas. For crowdsourcing (turk), the numbers indicate how many annotators out of 5 picked the sentence pair as paraphrases; 0,1 are considered non-paraphrases; 3,4,5 are paraphrases. For expert annotation, all 0,1,2 are non-paraphrases; 4,5 are paraphrases. Mediumscored cases are discarded in training and testing in our experiments.", |
| "cite_spans": [ |
| { |
| "start": 150, |
| "end": 151, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Raw Data from Twitter", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "identifies topics that are immediately popular, rather than those that have been popular for longer periods of time or which trend on a daily basis. We tokenize and split each tweet into sentences. 7", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Raw Data from Twitter", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We show the annotator an original sentence, then ask them to pick sentences with the same meaning from 10 candidate sentences. The original and candidate sentences are randomly sampled from the same topic. For each such 1 vs. 10 question, we obtain binary judgements from 5 different annotators, paying each annotator $0.02 per question. On average, each question takes one annotator about 30 \u223c 45 seconds to answer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Design on Mechanical Turk", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We remove problematic annotators by checking their Cohen's Kappa agreement (Artstein and ", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 88, |
| "text": "(Artstein and", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation Quality", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Trending Topics Figure 5 : The proportion of paraphrases (percentage of positive votes from annotators) vary greatly across different topics. Automatic filtering in Section 4.4 roughly doubles the paraphrase yield. sio, 2008) with other annotators. We also compute inter-annotator agreement with an expert annotator on 971 sentence pairs. In the expert annotation, we adopt a 5-point Likert scale to measure the degree of semantic similarity between sentences, which is defined by Agirre et al. (2012) as follows:", |
| "cite_spans": [ |
| { |
| "start": 481, |
| "end": 501, |
| "text": "Agirre et al. (2012)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 16, |
| "end": 24, |
| "text": "Figure 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Percentage of Positive Judgements", |
| "sec_num": null |
| }, |
| { |
| "text": "5: Completely equivalent, as they mean the same thing; 4: Mostly equivalent, but some unimportant details differ; 3: Roughly equivalent, but some important information differs/missing. 2: Not equivalent, but share some details; 1: Not equivalent, but are on the same topic; 0: On different topics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Percentage of Positive Judgements", |
| "sec_num": null |
| }, |
| { |
| "text": "Although the two scales of expert and crowdsourcing annotation are defined differently, their Pearson correlation coefficient reaches 0.735 (twotailed significance 0.001). Figure 4 shows a heatmap representing the detailed overlap between the two annotations. It suggests that the graded similarity annotation task could be reduced to a binary choice in a crowdsourcing setup.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 172, |
| "end": 180, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Percentage of Positive Judgements", |
| "sec_num": null |
| }, |
| { |
| "text": "We filter the sentences within each topic to select more probable paraphrases for annotation. Our method is inspired by a typical problem in extractive summarization, that the salient sentences are likely redundant (paraphrases) and need to be removed in the output summaries. We employ the scoring method used in SumBasic (Nenkova and Vanderwende, 2005; Vanderwende et al., 2007) , a simple but powerful summarization system, to find salient sentences. For each topic, we compute the probability of each word P (w i ) by simply dividing its frequency by the total number of all words in all sentences. Each sentence s is scored as the average of the probabilities of the words in it, i.e.", |
| "cite_spans": [ |
| { |
| "start": 323, |
| "end": 354, |
| "text": "(Nenkova and Vanderwende, 2005;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 355, |
| "end": 380, |
| "text": "Vanderwende et al., 2007)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Summarization Inspired Sentence Filtering", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Salience(s) = w i \u2208s P (w i ) |{w i |w i \u2208 s}|", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Automatic Summarization Inspired Sentence Filtering", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We then rank the sentences and pick the original sentence randomly from top 10% salient sentences and candidate sentences from top 50% to present to the annotators.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Summarization Inspired Sentence Filtering", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "In a trial experiment of 20 topics, the filtering technique double the yield of paraphrases from 152 to 329 out of 2000 sentence pairs over na\u00efve random sampling ( Figure 5 and Figure 6 ). We also use PINC (Chen and Dolan, 2011) to measure the quality of paraphrases collected (Figure 7) . PINC was designed to measure n-gram dissimilarity between two sentences, and in essence it is the inverse of BLEU. In general, the cases with high PINC scores include more complex and interesting rephrasings. Figure 7 : PINC scores of paraphrases collected. The higher the PINC, the more significant the rewording. Our proposed annotation strategy quadruples paraphrase yield, while not greatly reducing diversity as measured by PINC.", |
| "cite_spans": [ |
| { |
| "start": 206, |
| "end": 228, |
| "text": "(Chen and Dolan, 2011)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 164, |
| "end": 172, |
| "text": "Figure 5", |
| "ref_id": null |
| }, |
| { |
| "start": 177, |
| "end": 185, |
| "text": "Figure 6", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 277, |
| "end": 287, |
| "text": "(Figure 7)", |
| "ref_id": null |
| }, |
| { |
| "start": 499, |
| "end": 507, |
| "text": "Figure 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automatic Summarization Inspired Sentence Filtering", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Another approach to increasing paraphrase yield is to choose more appropriate topics. This is particularly important because the number of paraphrases varies greatly from topic to topic and thus the chance to encounter paraphrases during annotation (Figure 5) . We treat this topic selection problem as a variation of the Multi-Armed Bandit (MAB) problem (Robbins, 1985) and adapt a greedy algorithm, the bounded -first algorithm, of Tran-Thanh et al. (2012) to accelerate our corpus construction.", |
| "cite_spans": [ |
| { |
| "start": 355, |
| "end": 370, |
| "text": "(Robbins, 1985)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 434, |
| "end": 458, |
| "text": "Tran-Thanh et al. (2012)", |
| "ref_id": "BIBREF47" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 249, |
| "end": 259, |
| "text": "(Figure 5)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Topic Selection using Multi-Armed Bandits (MAB) Algorithm", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Our strategy consists of two phases. In the first exploration phase, we dedicate a fraction of the total budget, , to explore randomly chosen arms of each slot machine (trending topic on Twitter), each m times. In the second exploitation phase, we sort all topics according to their estimated proportion of paraphrases, and sequentially annotate (1\u2212 )B l\u2212m arms that have the highest estimated reward until reaching the maximum l = 10 annotations for any topic to insure data diversity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Selection using Multi-Armed Bandits (MAB) Algorithm", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "We tune the parameters m to be 1 and to be between 0.35 \u223c 0.55 through simulation experiments, by artificially duplicating a small amount of real annotation data. We then apply this MAB algorithm in the real-world. We explore 500 random topics and then exploited 100 of them. The yield of paraphrases rises to 688 out of 2000 sentence pairs by using MAB and sentence filtering, a 4-fold increase compared to only using random selection ( Figure 6 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 438, |
| "end": 446, |
| "text": "Figure 6", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Topic Selection using Multi-Armed Bandits (MAB) Algorithm", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Automatic Paraphrase Identification has been widely studied (Androutsopoulos and Malakasiotis, 2010; Madnani and Dorr, 2010) . The ACL Wiki gives an excellent summary of various techniques. 8 Many recent high-performance approaches use system combination (Das and Smith, 2009; Madnani et al., 2012; Ji and Eisenstein, 2013) . For example, Madnani et al. (2012) combines multiple sophisticated machine translation metrics using a metaclassifier. An earlier attempt on Twitter data is that of Xu et al. (2013b) . They limited the search space to only the tweets that explicitly mention a same date and a same named entity, however there remain a considerable amount of mislabels in their data. 9 Zanzotto et al. (2011) also experimented with SVM tree kernel methods on Twitter data.", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 100, |
| "text": "(Androutsopoulos and Malakasiotis, 2010;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 101, |
| "end": 124, |
| "text": "Madnani and Dorr, 2010)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 255, |
| "end": 276, |
| "text": "(Das and Smith, 2009;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 277, |
| "end": 298, |
| "text": "Madnani et al., 2012;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 299, |
| "end": 323, |
| "text": "Ji and Eisenstein, 2013)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 339, |
| "end": 360, |
| "text": "Madnani et al. (2012)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 491, |
| "end": 508, |
| "text": "Xu et al. (2013b)", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 692, |
| "end": 693, |
| "text": "9", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Departing from the previous work, we propose a latent variable model to jointly infer the correspondence between words and sentences. It is related to discriminative monolingual word alignment (MacCartney et al., 2008; Thadani and McKeown, 2011; Yao et al., 2013a,b) , but different in that the paraphrase task requires additional sentence alignment modeling with no word alignment data. Our approach is also inspired by Fung and Cheung's (2004a; 2004b) work on bootstrapping bilingual parallel sentence and word translations from comparable corpora.", |
| "cite_spans": [ |
| { |
| "start": 193, |
| "end": 218, |
| "text": "(MacCartney et al., 2008;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 219, |
| "end": 245, |
| "text": "Thadani and McKeown, 2011;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 246, |
| "end": 266, |
| "text": "Yao et al., 2013a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 421, |
| "end": 446, |
| "text": "Fung and Cheung's (2004a;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 447, |
| "end": 453, |
| "text": "2004b)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Multiple Instance Learning (Dietterich et al., 1997) has been used by different research groups in the field of information extraction (Riedel et al., 2010; Hoffmann et al., 2011; Surdeanu et al., 2012; Ritter et al., 2013; Xu et al., 2013a) . The idea is to leverage structured data as weak supervision for tasks such as relation extraction. This is done, for example, by making the assumption that at least one sentence in the corpus which mentions a pair of entities (e 1 , e 2 ) participating in a relation (r) expresses the proposition: r(e 1 , e 2 ).", |
| "cite_spans": [ |
| { |
| "start": 27, |
| "end": 52, |
| "text": "(Dietterich et al., 1997)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 135, |
| "end": 156, |
| "text": "(Riedel et al., 2010;", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 157, |
| "end": 179, |
| "text": "Hoffmann et al., 2011;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 180, |
| "end": 202, |
| "text": "Surdeanu et al., 2012;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 203, |
| "end": 223, |
| "text": "Ritter et al., 2013;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 224, |
| "end": 241, |
| "text": "Xu et al., 2013a)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Crowdsourcing Paraphrase Acquisition: Buzek et al. (2010) and Denkowski et al. (2010) focused specifically on collecting paraphrases of text to be translated to improve machine translation quality. Chen and Dolan (2011) gathered a large-scale paraphrase corpus by asking Mechanical Turk workers to caption the action in short video segments. Similarly, Burrows et al. (2012) asked crowdsourcing workers to rewrite selected excerpts from books. Ling et al. (2014) crowdsourced bilingual parallel text using Twitter as the source of data.", |
| "cite_spans": [ |
| { |
| "start": 38, |
| "end": 57, |
| "text": "Buzek et al. (2010)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 62, |
| "end": 85, |
| "text": "Denkowski et al. (2010)", |
| "ref_id": null |
| }, |
| { |
| "start": 198, |
| "end": 219, |
| "text": "Chen and Dolan (2011)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 353, |
| "end": 374, |
| "text": "Burrows et al. (2012)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 444, |
| "end": 462, |
| "text": "Ling et al. (2014)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In contrast, we design a simple crowdsourcing task requiring only binary judgements on sentences collected from Twitter. There are several advantages as compared to existing work: a) the corpus also covers a very diverse range of topics and linguistic expressions, especially colloquial language, which is different from and thus complements previous paraphrase corpora; b) the paraphrase corpus collected contains a representative proportion of both negative and positive instances, while lack of good negative examples was an issue in the previous research (Das and Smith, 2009) ; c) this method is scalable and sustainable due to the simplicity of the task and real-time, virtually unlimited text supply from Twitter.", |
| "cite_spans": [ |
| { |
| "start": 559, |
| "end": 580, |
| "text": "(Das and Smith, 2009)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "This paper introduced MULTIP, a joint wordsentence model to learn paraphrases from temporally and topically grouped messages in Twitter. While simple and principled, our model achieves performance competitive with a state-of-the-art ensemble system combining latent semantic representations and surface similarity. By combining our method with previous work as a product-of-experts we outperform the state-of-the-art. Our latentvariable approach is capable of learning word-level paraphrase anchors given only sentence annotations. Because our graphical model is modular and extensible (for example it should be possible to replace the deterministic-or with other aggregators), we are optimistic this work might provide a path towards weakly supervised word alignment models using only sentence-level annotations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In addition, we presented a novel and efficient annotation methodology which was used to crowdsource a unique corpus of paraphrases harvested from Twitter. We make this resource available to the research community.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Transactions of the Association for Computational Linguistics, 2 (2014) 435-448. Action Editor: Sharon Goldwater.Submitted 8/2014; Revised 10/2014; Published 10/2014. c 2014 Association for Computational Linguistics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The dataset and code are made available at: SemEval-2015 shared task http://alt.qcri.org/semeval2015/ task1/ and https://github.com/cocoxu/ twitterparaphrase/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We use the toolkit developed by O'Connor et al.(2010): https://github.com/brendano/tweetmotif", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://aclweb.org/aclwiki/index.php? title=Paraphrase_Identification_(State_of_ the_art)9 The data is released byXu et al. (2013b) at: https:// github.com/cocoxu/twitterparaphrase/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The author would like to thank editor Sharon Goldwater and three anonymous reviewers for their thoughtful comments, which substantially improved this paper. We also thank Ralph Grishman, Sameer Singh, Yoav Artzi, Mark Yatskar, Chris Quirk, Ani Nenkova and Mitch Marcus for their feedback.This material is based in part on research sponsored by the NSF under grant IIS-1430651, DARPA under agreement number FA8750-13-2-0017 (the DEFT program) and through a Google Faculty Research Award to Chris Callison-Burch. The U.S. Government is authorized to reproduce and distribute reprints for governmental purposes. The views and conclusions contained in this publication are those of the authors and should not be interpreted as representing official policies or endorsements of DARPA or the U.S. Government. Yangfeng Ji is supported by a Google Faculty Research Award awarded to Jacob Eisenstein.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Semeval-2012 task 6: A pilot on semantic textual similarity", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gonzalez-Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the First Joint Conference on Lexical and Computational Semantics (*SEM)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Agirre, E., Diab, M., Cer, D., and Gonzalez-Agirre, A. (2012). Semeval-2012 task 6: A pilot on se- mantic textual similarity. In Proceedings of the First Joint Conference on Lexical and Computa- tional Semantics (*SEM).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A survey of paraphrasing and textual entailment methods", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Androutsopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Malakasiotis", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "38", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Androutsopoulos, I. and Malakasiotis, P. (2010). A survey of paraphrasing and textual entailment methods. Journal of Artificial Intelligence Re- search, 38.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Inter-coder agreement for computational linguistics", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Artstein", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Computational Linguistics", |
| "volume": "34", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Artstein, R. and Poesio, M. (2008). Inter-coder agreement for computational linguistics. Compu- tational Linguistics, 34(4).", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "What is a paraphrase?", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Bhagat", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bhagat, R. and Hovy, E. (2013). What is a para- phrase? Computational Linguistics, 39(3).", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Paraphrase acquisition via crowdsourcing and machine learning", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Burrows", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Potthast", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Transactions on Intelligent Systems and Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Burrows, S., Potthast, M., and Stein, B. (2012). Paraphrase acquisition via crowdsourcing and machine learning. Transactions on Intelligent Systems and Technology (ACM TIST).", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Error driven paraphrase annotation using Mechanical Turk", |
| "authors": [ |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Buzek", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [ |
| "B" |
| ], |
| "last": "Bederson", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Workshop on Creating Speech and Language Data with Amazon's Mechanical Turk", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Buzek, O., Resnik, P., and Bederson, B. B. (2010). Error driven paraphrase annotation using Me- chanical Turk. In Proceedings of the Workshop on Creating Speech and Language Data with Ama- zon's Mechanical Turk.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Collecting highly parallel data for paraphrase evaluation", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "L" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "B" |
| ], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen, D. L. and Dolan, W. B. (2011). Collecting highly parallel data for paraphrase evaluation. In Proceedings of the 49th Annual Meeting of the As- sociation for Computational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Discriminative training methods for hidden Markov models: Theory and experiments with perceptron algorithms", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the Conference on Empirical Methods on Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Collins, M. (2002). Discriminative training methods for hidden Markov models: Theory and experi- ments with perceptron algorithms. In Proceed- ings of the Conference on Empirical Methods on Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Paraphrase identification as probabilistic quasi-synchronous recognition", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the Association for Computational Linguistics and the 4th International Joint Conference on Natural Language Processing of the Asian Federation of Natural Language Processing (ACL-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Das, D. and Smith, N. A. (2009). Paraphrase identi- fication as probabilistic quasi-synchronous recog- nition. In Proceedings of the Joint Conference of the 47th Annual Meeting of the Association for Computational Linguistics and the 4th Inter- national Joint Conference on Natural Language Processing of the Asian Federation of Natural Language Processing (ACL-IJCNLP).", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Turker-assisted paraphrasing for English-Arabic machine translation", |
| "authors": [], |
| "year": null, |
| "venue": "Proceedings of the Workshop on Creating Speech and Language Data with Amazon's Mechanical Turk", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Turker-assisted paraphrasing for English-Arabic machine translation. In Proceedings of the Work- shop on Creating Speech and Language Data with Amazon's Mechanical Turk.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Twitter part-of-speech tagging for all: Overcoming sparse and noisy data", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Bontcheva", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Recent Advances in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Derczynski, L., Ritter, A., Clark, S., and Bontcheva, K. (2013). Twitter part-of-speech tagging for all: Overcoming sparse and noisy data. In Proceed- ings of the Recent Advances in Natural Language Processing (RANLP).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Finding bursty topics from microblogs", |
| "authors": [ |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Diao", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "E.-P", |
| "middle": [], |
| "last": "Lim", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diao, Q., Jiang, J., Zhu, F., and Lim, E.-P. (2012). Finding bursty topics from microblogs. In Pro- ceedings of the 50th Annual Meeting of the Asso- ciation for Computational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Solving the multiple instance problem with axis-parallel rectangles", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "G" |
| ], |
| "last": "Dietterich", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "H" |
| ], |
| "last": "Lathrop", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Lozano-P\u00e9rez", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Artificial Intelligence", |
| "volume": "89", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dietterich, T. G., Lathrop, R. H., and Lozano-P\u00e9rez, T. (1997). Solving the multiple instance prob- lem with axis-parallel rectangles. Artificial Intel- ligence, 89(1).", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Unsupervised construction of large paraphrase corpora: Exploiting massively parallel news sources", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Quirk", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 20th International Conference on Computational Linguistics (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dolan, B., Quirk, C., and Brockett, C. (2004). Un- supervised construction of large paraphrase cor- pora: Exploiting massively parallel news sources. In Proceedings of the 20th International Confer- ence on Computational Linguistics (COLING).", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Automatically constructing a corpus of sentential paraphrases", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 3rd International Workshop on Paraphrasing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dolan, W. and Brockett, C. (2005). Automatically constructing a corpus of sentential paraphrases. In Proceedings of the 3rd International Workshop on Paraphrasing.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Accurate methods for the statistics of surprise and coincidence", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Dunning", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dunning, T. (1993). Accurate methods for the statis- tics of surprise and coincidence. Computational Linguistics, 19(1).", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "WordNet. In Theory and Applications of Ontology: Computer Applications", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Fellbaum", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fellbaum, C. (2010). WordNet. In Theory and Ap- plications of Ontology: Computer Applications. Springer.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Brown corpus manual", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [ |
| "N" |
| ], |
| "last": "Francis", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Kucera", |
| "suffix": "" |
| } |
| ], |
| "year": 1979, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francis, W. N. and Kucera, H. (1979). Brown corpus manual. Brown University.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Mining very-nonparallel corpora: Parallel sentence and lexicon extraction via bootstrapping and EM", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Cheung", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fung, P. and Cheung, P. (2004a). Mining very-non- parallel corpora: Parallel sentence and lexicon ex- traction via bootstrapping and EM. In Proceed- ings of the Conference on Empirical Methods in Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Multi-level bootstrapping for extracting parallel sentences from a quasi-comparable corpus", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Cheung", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the International Conference on Computational Linguistics (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fung, P. and Cheung, P. (2004b). Multi-level boot- strapping for extracting parallel sentences from a quasi-comparable corpus. In Proceedings of the International Conference on Computational Lin- guistics (COLING).", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Modeling sentences in the latent space", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guo, W. and Diab, M. (2012). Modeling sentences in the latent space. In Proceedings of the 50th", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Annual Meeting of the Association for Computa- tional Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Linking tweets to news: A framework to enrich short text data in social media", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guo, W., Li, H., Ji, H., and Diab, M. (2013). Link- ing tweets to news: A framework to enrich short text data in social media. In Proceedings of the 51th Annual Meeting of the Association for Com- putational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Automatically constructing a normalisation dictionary for microblogs", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Cook", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Conference on Empirical Methods on Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Han, B., Cook, P., and Baldwin, T. (2012). Auto- matically constructing a normalisation dictionary for microblogs. In Proceedings of the Confer- ence on Empirical Methods on Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL).", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Training products of experts by minimizing contrastive divergence", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Neural Computation", |
| "volume": "", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hinton, G. E. (2002). Training products of experts by minimizing contrastive divergence. Neural Computation, 14(8).", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Knowledge-based weak supervision for information extraction of overlapping relations", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "S" |
| ], |
| "last": "Weld", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hoffmann, R., Zhang, C., Ling, X., Zettlemoyer, L. S., and Weld, D. S. (2011). Knowledge-based weak supervision for information extraction of overlapping relations. In Proceedings of the 49th Annual Meeting of the Association for Computa- tional Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "OntoNotes: the 90% solution", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Ramshaw", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Weischedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Human Language Technology Conference -North American Chapter of the Association for Computational Linguistics Annual Meeting (HLT-NAACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hovy, E., Marcus, M., Palmer, M., Ramshaw, L., and Weischedel, R. (2006). OntoNotes: the 90% solution. In Proceedings of the Human Language Technology Conference -North American Chap- ter of the Association for Computational Linguis- tics Annual Meeting (HLT-NAACL).", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Discriminative improvements to distributional sentence similarity", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ji, Y. and Eisenstein, J. (2013). Discriminative improvements to distributional sentence similar- ity. In Proceedings of the Conference on Em- pirical Methods in Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "An end-to-end discriminative approach to machine translation", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bouchard-C\u00f4t\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Taskar", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the Association for Computational Linguistics (COLING-ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liang, P., Bouchard-C\u00f4t\u00e9, A., Klein, D., and Taskar, B. (2006). An end-to-end discriminative approach to machine translation. In Proceedings of the 21st International Conference on Computational Lin- guistics and the 44th annual meeting of the Asso- ciation for Computational Linguistics (COLING- ACL).", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Crowdsourcing high-quality parallel data extraction from Twitter", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Marujo", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Alan", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabel", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the Ninth Workshop on Statistical Machine Translation (WMT)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ling, W., Marujo, L., Dyer, C., Alan, B., and Isabel, T. (2014). Crowdsourcing high-quality parallel data extraction from Twitter. In Proceedings of the Ninth Workshop on Statistical Machine Trans- lation (WMT).", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "A phrase-based alignment model for natural language inference", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Maccartney", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "MacCartney, B., Galley, M., and Manning, C. (2008). A phrase-based alignment model for natural language inference. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Generating phrasal and sentential paraphrases: A survey of data-driven methods", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Madnani", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [ |
| "J" |
| ], |
| "last": "Dorr", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Madnani, N. and Dorr, B. J. (2010). Generating phrasal and sentential paraphrases: A survey of data-driven methods. Computational Linguistics, 36(3).", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Re-examining machine translation metrics for paraphrase identification", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Madnani", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Tetreault", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Chodorow", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics -Human Language Technologies (NAACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Madnani, N., Tetreault, J., and Chodorow, M. (2012). Re-examining machine translation met- rics for paraphrase identification. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics -Human Language Technologies (NAACL-HLT).", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Applied morphological processing of english", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Minnen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Carroll", |
| "suffix": "" |
| }, |
| { |
| "first": "Pearce", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Natural Language Engineering", |
| "volume": "7", |
| "issue": "03", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minnen, G., Carroll, J., and Pearce, D. (2001). Ap- plied morphological processing of english. Natu- ral Language Engineering, 7(03).", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "On log-likelihood-ratios and the significance of rare events", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [ |
| "C" |
| ], |
| "last": "Moore", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moore, R. C. (2004). On log-likelihood-ratios and the significance of rare events. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "The impact of frequency on summarization", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Vanderwende", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nenkova, A. and Vanderwende, L. (2005). The im- pact of frequency on summarization. Technical report, Microsoft Research. MSR-TR-2005-101.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Tweetmotif: Exploratory search and topic summarization for Twitter", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "O'connor", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Krieger", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahn", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 4th International AAAI Conference on Weblogs and Social Media (ICWSM)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "O'Connor, B., Krieger, M., and Ahn, D. (2010). Tweetmotif: Exploratory search and topic sum- marization for Twitter. In Proceedings of the 4th International AAAI Conference on Weblogs and Social Media (ICWSM).", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Using paraphrases for improving first story detection in news and Twitter", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Petrovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Osborne", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Lavrenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics -Human Language Technologies (NAACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Petrovi\u0107, S., Osborne, M., and Lavrenko, V. (2012). Using paraphrases for improving first story detec- tion in news and Twitter. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics -Hu- man Language Technologies (NAACL-HLT).", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Modeling relations and their mentions without labeled text", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedigns of the European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML-PKDD)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Riedel, S., Yao, L., and McCallum, A. (2010). Mod- eling relations and their mentions without labeled text. In Proceedigns of the European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML- PKDD).", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Open domain event extraction from Twitter", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mausam", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| }, |
| { |
| "first": "Clark", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 18th International Conference on Knowledge Discovery and Data Mining (SIGKDD)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ritter, A., Mausam, Etzioni, O., and Clark, S. (2012). Open domain event extraction from Twit- ter. In Proceedings of the 18th International Con- ference on Knowledge Discovery and Data Min- ing (SIGKDD).", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Modeling missing data in distant supervision for information extraction", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mausam", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ritter, A., Zettlemoyer, L., Mausam, and Etzioni, O. (2013). Modeling missing data in distant super- vision for information extraction. Transactions of the Association for Computational Linguistics (TACL).", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Some aspects of the sequential design of experiments", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Robbins", |
| "suffix": "" |
| } |
| ], |
| "year": 1985, |
| "venue": "Herbert Robbins Selected Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robbins, H. (1985). Some aspects of the sequen- tial design of experiments. In Herbert Robbins Selected Papers. Springer.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Automatic paraphrase discovery based on context and keywords between NE pairs", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Sekine", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 3rd International Workshop on Paraphrasing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sekine, S. (2005). Automatic paraphrase discovery based on context and keywords between NE pairs. In Proceedings of the 3rd International Workshop on Paraphrasing.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Automatic paraphrase acquisition from news articles", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Shinyama", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Sekine", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Sudo", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 2nd International Conference on Human Language Technology Research (HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shinyama, Y., Sekine, S., and Sudo, K. (2002). Au- tomatic paraphrase acquisition from news articles. In Proceedings of the 2nd International Confer- ence on Human Language Technology Research (HLT).", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Multi-instance multi-label learning for relation extraction", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Tibshirani", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Nallapati", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Surdeanu, M., Tibshirani, J., Nallapati, R., and Man- ning, C. D. (2012). Multi-instance multi-label learning for relation extraction. In Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Optimal and syntactically-informed decoding for monolingual phrase-based alignment", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Thadani", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Mckeown", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics -Human Language Technologies (ACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thadani, K. and McKeown, K. (2011). Optimal and syntactically-informed decoding for monolingual phrase-based alignment. In Proceedings of the 49th Annual Meeting of the Association for Com- putational Linguistics -Human Language Tech- nologies (ACL-HLT).", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Efficient crowdsourcing of unknown experts using multi-armed bandits", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Tran-Thanh", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennings", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "R" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the European Conference on Artificial Intelligence (ECAI)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tran-Thanh, L., Stein, S., Rogers, A., and Jennings, N. R. (2012). Efficient crowdsourcing of un- known experts using multi-armed bandits. In Pro- ceedings of the European Conference on Artificial Intelligence (ECAI).", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Beyond SumBasic: Taskfocused summarization with sentence simplification and lexical expansion", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Vanderwende", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Suzuki", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Information Processing & Management", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vanderwende, L., Suzuki, H., Brockett, C., and Nenkova, A. (2007). Beyond SumBasic: Task- focused summarization with sentence simplifica- tion and lexical expansion. Information Process- ing & Management, 43.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Using dependency-based features to take the \"para-farce\" out of paraphrase", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Dras", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Dale", |
| "suffix": "" |
| }, |
| { |
| "first": "Paris", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Australasian Language Technology Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wan, S., Dras, M., Dale, R., and Paris, C. (2006). Using dependency-based features to take the \"para-farce\" out of paraphrase. In Proceedings of the Australasian Language Technology Work- shop.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Paraphrasing 4 microblog normalization", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "W" |
| ], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Trancoso", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Conference on Empirical Methods on Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang, L., Dyer, C., Black, A. W., and Trancoso, I. (2013). Paraphrasing 4 microblog normaliza- tion. In Proceedings of the Conference on Em- pirical Methods on Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "The state of record linkage and current research problems", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [ |
| "E" |
| ], |
| "last": "Winkler", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Winkler, W. E. (1999). The state of record link- age and current research problems. Technical re- port, Statistical Research Division, U.S. Census Bureau.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Filling knowledge base gaps for distant supervision of relation extraction", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xu, W., Hoffmann, R., Zhao, L., and Grishman, R. (2013a). Filling knowledge base gaps for distant supervision of relation extraction. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Gathering and generating paraphrases from Twitter with application to normalization", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Sixth Workshop on Building and Using Comparable Corpora (BUCC)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xu, W., Ritter, A., and Grishman, R. (2013b). Gath- ering and generating paraphrases from Twitter with application to normalization. In Proceed- ings of the Sixth Workshop on Building and Using Comparable Corpora (BUCC).", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "A lightweight and high performance monolingual word aligner", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Clark", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yao, X., Van Durme, B., Callison-Burch, C., and Clark, P. (2013a). A lightweight and high perfor- mance monolingual word aligner. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "Semi-markov phrase-based monolingual alignment", |
| "authors": [], |
| "year": null, |
| "venue": "Proceedings of the Conference on Empirical Methods on Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Semi-markov phrase-based monolingual align- ment. In Proceedings of the Conference on Em- pirical Methods on Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Linguistic redundancy in Twitter", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [ |
| "M" |
| ], |
| "last": "Zanzotto", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Pennacchiotti", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Tsioutsiouliklis", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zanzotto, F. M., Pennacchiotti, M., and Tsiout- siouliklis, K. (2011). Linguistic redundancy in Twitter. In Proceedings of the Conference on Em- pirical Methods in Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Online learning of relaxed CCG grammars for parsing to logical form", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zettlemoyer, L. S. and Collins, M. (2007). On- line learning of relaxed CCG grammars for pars- ing to logical form. In Proceedings of the 2007", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joint Conference on Empirical Methods in Natu- ral Language Processing and Computational Nat- ural Language Learning (EMNLP-CoNLL).", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "Harvesting parallel news streams to generate paraphrases of event relations", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "S" |
| ], |
| "last": "Weld", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, C. and Weld, D. S. (2013). Harvesting paral- lel news streams to generate paraphrases of event relations. In Proceedings of the Conference on Empirical Methods in Natural Language Process- ing (EMNLP).", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "num": null, |
| "text": "MULTIP Learning Algorithm", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "num": null, |
| "text": "Precision and recall curves. Our MULTIP model alone achieves competitive performance with the LEXLATENT system that combines latent space model and feature-based supervised classifier. The two approaches have complementary strengths, and achieves significant improvement when combined together (MULTIP-PE).", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF4": { |
| "num": null, |
| "text": "Judging as Paraphrases (out of 5) Number of Sentence Pairs (out of 2000) Numbers of paraphrases collected by different methods. The annotation efficiency (3,4,5 are regarded as paraphrases) is significantly improved by the sentence filtering and Multi-Armed Bandits (MAB) based topic selection.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF1": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Representative examples from paraphrase corpora. The average sentence length is 11.9 words in Twitter vs. 18.6 in the news corpus.", |
| "content": "<table/>" |
| }, |
| "TABREF3": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Performance of different paraphrase identification approaches on Twitter data. *An enhanced version that uses additional 1.6 million sentences from Twitter. ** Reimplementation of a strong baseline used by", |
| "content": "<table/>" |
| }, |
| "TABREF5": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Feature ablation by removing each individual feature group from the full set. The new Ciroc flavor has arrived rank=12 rank=266 \u2022 Ciroc got a new flavor comin out YES \u2022 Roberto Mancini gets the boot from Man City rank=64 rank=452 \u2022 Roberto Mancini has been sacked by Manchester City with the Blues saying YES \u2022 I want to watch the purge tonight rank=136 rank=11 \u2022 I want to go see The Purge who wants to come with NO", |
| "content": "<table><tr><td>To assess the impact of different features on the</td></tr><tr><td>model's performance, we conduct feature ablation</td></tr><tr><td>experiments, removing one group of features at a</td></tr><tr><td>time. The results are shown in Table 3. Both string</td></tr></table>" |
| }, |
| "TABREF6": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "Example system outputs; rank is the position in the list of all candidate paraphrase pairs in the test set ordered by model score. MULTIP discovers lexically divergent paraphrases while LEXLATENT prefers more overall sentence similarity. Underline marks the word pair(s) with highest estimated probability as paraphrastic anchor(s) for each sentence pair.", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |