| { |
| "paper_id": "S15-2012", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:38:00.262498Z" |
| }, |
| "title": "TKLBLIIR: Detecting Twitter Paraphrases with TweetingJay", |
| "authors": [ |
| { |
| "first": "Mladen", |
| "middle": [], |
| "last": "Karan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Zagreb", |
| "location": { |
| "addrLine": "Unska 3", |
| "postCode": "10000", |
| "settlement": "Zagreb", |
| "country": "Croatia" |
| } |
| }, |
| "email": "mladen.karan@fer.hr" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Zagreb", |
| "location": { |
| "addrLine": "Unska 3", |
| "postCode": "10000", |
| "settlement": "Zagreb", |
| "country": "Croatia" |
| } |
| }, |
| "email": "goran.glavas@fer.hr" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jan\u0161najder", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Zagreb", |
| "location": { |
| "addrLine": "Unska 3", |
| "postCode": "10000", |
| "settlement": "Zagreb", |
| "country": "Croatia" |
| } |
| }, |
| "email": "jan.snajder@fer.hr" |
| }, |
| { |
| "first": "Bojana", |
| "middle": [], |
| "last": "Dalbelo Ba\u0161i\u0107", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Zagreb", |
| "location": { |
| "addrLine": "Unska 3", |
| "postCode": "10000", |
| "settlement": "Zagreb", |
| "country": "Croatia" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "ivan.vulic@cs.kuleuven.be" |
| }, |
| { |
| "first": "Marie-Francine", |
| "middle": [], |
| "last": "Moens", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "When tweeting on a topic, Twitter users often post messages that convey the same or similar meaning. We describe TweetingJay, a system for detecting paraphrases and semantic similarity of tweets, with which we participated in Task 1 of SemEval 2015. TweetingJay uses a supervised model that combines semantic overlap and word alignment features, previously shown to be effective for detecting semantic textual similarity. TweetingJay reaches 65.9% F1-score and ranked fourth among the 18 participating systems. We additionally provide an analysis of the dataset and point to some peculiarities of the evaluation setup.", |
| "pdf_parse": { |
| "paper_id": "S15-2012", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "When tweeting on a topic, Twitter users often post messages that convey the same or similar meaning. We describe TweetingJay, a system for detecting paraphrases and semantic similarity of tweets, with which we participated in Task 1 of SemEval 2015. TweetingJay uses a supervised model that combines semantic overlap and word alignment features, previously shown to be effective for detecting semantic textual similarity. TweetingJay reaches 65.9% F1-score and ranked fourth among the 18 participating systems. We additionally provide an analysis of the dataset and point to some peculiarities of the evaluation setup.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Recognizing tweets that convey the same meaning (paraphrases) or similar meaning is useful in applications such as event detection (Petrovi\u0107 et al., 2012) , tweet summarization (Yang et al., 2011) , and tweet retrieval (Naveed et al., 2011) . Paraphrase detection in tweets is a more challenging task than paraphrase detection in other domains such as news (Xu et al., 2013) . Besides brevity (max. 140 characters), tweets exhibit all the irregularities typical of social media text (Baldwin et al., 2013) , such as informality, ungrammaticality, disfluency, and excessive use of jargon.", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 154, |
| "text": "(Petrovi\u0107 et al., 2012)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 177, |
| "end": 196, |
| "text": "(Yang et al., 2011)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 219, |
| "end": 240, |
| "text": "(Naveed et al., 2011)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 357, |
| "end": 374, |
| "text": "(Xu et al., 2013)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 483, |
| "end": 505, |
| "text": "(Baldwin et al., 2013)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper we present the TweetingJay system for detecting paraphrases in tweets, with which we participated in Task 1 of SemEval 2015 evaluation exercise (Xu et al., 2015) . Our system builds on findings from a large body of work on semantic textual similarity (STS) (\u0160ari\u0107 et al., 2012; Sultan et al., 2014) and recent breakthroughs in distributed word representations (Mikolov et al., 2013a) . We design a set of measures that capture the semantic similarity of tweets and train a support vector machine (SVM) using these measures as features. Positioning of our system at rank four among 18 teams, with only point and a half lower performance compared to the the best-performing system, suggests that STS measures are useful for detecting paraphrases in Twitter. We make our system freely available. 1 Besides providing the description of the Tweeting-Jay system, in this paper we analyze the evaluation setup, with special focus on the provided dataset and its subsets (train, validation, and test), and discuss the stability of the evaluation results.", |
| "cite_spans": [ |
| { |
| "start": 158, |
| "end": 175, |
| "text": "(Xu et al., 2015)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 271, |
| "end": 291, |
| "text": "(\u0160ari\u0107 et al., 2012;", |
| "ref_id": null |
| }, |
| { |
| "start": 292, |
| "end": 312, |
| "text": "Sultan et al., 2014)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 374, |
| "end": 397, |
| "text": "(Mikolov et al., 2013a)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 807, |
| "end": 808, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There is a large body of work on automated paraphrase detection; see (Madnani and Dorr, 2010) for a comprehensive overview. The majority of research efforts focus on detecting paraphrases in standard texts such as news (Das and Smith, 2009; Madnani et al., 2012) or artificially generated text (Madnani et al., 2012) . State-of-the-art approaches typically combine several measures of semantic similarity between text fragments. For instance, Madnani et al. (2012) achieve state-of-the-art performance by combining eight different machine translation metrics in a supervised fashion.", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 93, |
| "text": "(Madnani and Dorr, 2010)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 219, |
| "end": 240, |
| "text": "(Das and Smith, 2009;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 241, |
| "end": 262, |
| "text": "Madnani et al., 2012)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 294, |
| "end": 316, |
| "text": "(Madnani et al., 2012)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 443, |
| "end": 464, |
| "text": "Madnani et al. (2012)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A task closely related to paraphrase detection is semantic textual similarity (STS), introduced at Se-mEval 2012 (Agirre et al., 2012) . There is now a significant amount of work on this task. The best performing STS systems employ various methods for aligning semantically corresponding words or otherwise quantifying the amount of semantically congruent content between two sentences (Sultan et al., 2014; \u0160ari\u0107 et al., 2012) .", |
| "cite_spans": [ |
| { |
| "start": 113, |
| "end": 134, |
| "text": "(Agirre et al., 2012)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 386, |
| "end": 407, |
| "text": "(Sultan et al., 2014;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 408, |
| "end": 427, |
| "text": "\u0160ari\u0107 et al., 2012)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In contrast, STS research on Twitter data has been scarce. Zanzotto et al. (2011) detect content redundancy between tweets, where redundant means paraphrased or entailed content. They achieve reasonable performance with SVM using vector-comparison and syntactic tree kernels. Xu et al. (2014) propose MUL-TIP, a latent variable model for joint inference of correspondence of words and sentences. An unsupervised model based on representing sentences in latent space is presented by Guo and Diab (2012) .", |
| "cite_spans": [ |
| { |
| "start": 59, |
| "end": 81, |
| "text": "Zanzotto et al. (2011)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 276, |
| "end": 292, |
| "text": "Xu et al. (2014)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 482, |
| "end": 501, |
| "text": "Guo and Diab (2012)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "TweetingJay is essentially a supervised machine learning model, which employs a number of semantic similarity features (18 features in total). Because the number of features is relatively small, we use SVM with a non-linear (RBF) kernel. Our features can be divided into 1 In the dataset provided by the organizers, each tweet is associated with a topic, with 10 to 100 tweet pairs per topic. An important preprocessing step is to remove tokens that can be found in the name of a topic. For example, for the topic \"Roberto Mancini\", we trim the tweets \"Roberto Mancini gets the boot from the Man City\" and \"City sacked Mancini\" to \"gets the boot from the Man City\" and \"City sacked\", respectively, and then compute the features on the trimmed tweets. The rationale is that, given a topic, there is an overlap in topic words between both paraphrase and non-paraphrase tweet pairs, which diminishes the discriminative power of the model's comparison features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TweetingJay", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Semantic overlap features compare the content words (nouns, verbs, adjectives, adverbs, and numbers).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Overlap Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Ngram overlap. We compute the number of matching n-grams between two tweets. This number is normalized by the length of the first and the second tweet, respectively, and the harmonic mean of these two measures is taken as the similarity score. These features are computed separately for unigrams and bigrams. We also compute a weighted version by weighting the matched words w with their information content:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Overlap Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "ic(w) = \u2212 log freq(w) + 1 w \u2208C freq(w ) + 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Overlap Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where C is the set of all words in the corpus and freq(w) is the word's frequency. We obtained the frequencies from the Google Books Ngrams (GBN) (Michel et al., 2011) . In the weighted version of the ngram overlap, the overlap is normalized by the sum of information contents of all words in the first and second tweet, respectively, and the resulting similarity score is the harmonic mean of these two scores.", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 167, |
| "text": "(Michel et al., 2011)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Overlap Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Greedy word alignment overlap (GWAO). To compute this feature, we iteratively pair the wordsone word from each tweet -according to their semantic similarity. In each iteration we greedily select the pair of words with the largest semantic similarity, and remove the words from their corresponding tweets, until no words are left in shorter of the two tweets. The similarity between words is computed as the cosine between their corresponding 300-dimension embedding vectors obtained using word2vec tool (Mikolov et al., 2013b ) on a 100 billion words portion of the Google News dataset. Let P (t 1 , t 2 ) be the set of word pairs obtained through the alignment on a pair of tweets (t 1 , t 2 ) and let vec(w) be the embedding vector of the word w. The GWAO score is computed as:", |
| "cite_spans": [ |
| { |
| "start": 503, |
| "end": 525, |
| "text": "(Mikolov et al., 2013b", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Overlap Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "gwao(t 1 , t 2 ) = (w 1 ,w 2 ) \u2208P (t 1 ,t 2 ) \u03b1 \u2022 cos (vec(w 1 ), vec(w 2 ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Overlap Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where \u03b1 is the larger of the information contents of the two words, \u03b1 = max (ic(w 1 ), ic(w 1 )). The gwao(t 1 , t 2 ) score is normalized with the sum of information contents of words from t 1 and t 2 , respectively, and the harmonic mean of the two normalized scores is taken as the feature value.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Overlap Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Tweet embedding similarity. Linear combinations of word embedding vectors have been shown to correspond well to the semantic composition of the individual words (Mikolov et al., 2013a; Mikolov et al., 2013b) . Building on this finding, we embed a tweet as a weighted sum of the embeddings of its content words, where we use information content of words as their weights:", |
| "cite_spans": [ |
| { |
| "start": 161, |
| "end": 184, |
| "text": "(Mikolov et al., 2013a;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 185, |
| "end": 207, |
| "text": "Mikolov et al., 2013b)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Overlap Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "vec(t) = w\u2208t ic(w) \u2022 vec(w).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Overlap Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "As the tweet embedding similarity, we simply compute the cosine between the corresponding tweet embeddings, i.e., cos (vec(t 1 ), vec(t 2 )).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Overlap Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Topic-specific information content. While information content computed on a general corpus such as GBN indicates how informative the word is in general, we also wanted to have a measure of how informative each word is within a tweet's topic. To this end we also compute topic-specific versions of all the above features using topic-specific instead of GBN information contents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Overlap Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We adopt the word alignment features from two alignment-based systems: (1) the DLS@CU system of Sultan et al. (2014), which achieved the best performance on the STS task at SemEval 2014 (Agirre et al., 2014) , and (2) our implementation of the MULTIP latent variables model (Xu et al., 2014) , which utilizes the concept of an anchor: a pair of semantically aligned words from a paraphrased pair of tweets.", |
| "cite_spans": [ |
| { |
| "start": 186, |
| "end": 207, |
| "text": "(Agirre et al., 2014)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 274, |
| "end": 291, |
| "text": "(Xu et al., 2014)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Alignment Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Aligned word pairs (AWP). A state-of-the art monolingual word alignment model by Sultan et al.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Alignment Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "outputs pairs of semantically aligned words between two given sentences. 2 We used the output of the DLS@CU model to generate two features: (1) the raw count of the aligned word pairs, and (2) the normalized count, which is the harmonic mean of the scores obtained by normalizing the raw count with the length of the first and second tweet, respectively. We computed two versions for both of these features, one considering all the tokens in tweets, and the other taking into account only content words.", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 74, |
| "text": "2", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Alignment Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Anchor count (ANC). We re-implemented the MULTIP model of Xu et al. (2014) . 3 As anchor candidates we consider all pairs of content words from the two tweets. We use a minimalistic set of features including (1) Levenshtein distance between candidate words, (2) several binary features indicating relatedness of words (e.g., lowercased tokens match, POStags match), and (3) semantic similarity obtained as the cosine of word embeddings, obtained with the GloVe model (Pennington et al., 2014) trained on Twitter data. 4 To account for feature interactions, following (Xu et al., 2014) , we also use conjunction features. We use the number of anchors identified by this method for a pair of tweets as a feature for our SVM model.", |
| "cite_spans": [ |
| { |
| "start": 58, |
| "end": 74, |
| "text": "Xu et al. (2014)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 77, |
| "end": 78, |
| "text": "3", |
| "ref_id": null |
| }, |
| { |
| "start": 467, |
| "end": 492, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 518, |
| "end": 519, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 567, |
| "end": 584, |
| "text": "(Xu et al., 2014)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Alignment Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Each team was allowed to submit two runs on the test set provided by the task organizers (Xu et al., 2015) . Participants were provided with a training set (13,063 pairs) and a development set (4,727 pairs). We used the train and development set to optimize the hyperparameters C and \u03b3 of our SVM model with the RBF kernel. For the final evaluation, the organizers used a test set of 972 tweet pairs.", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 106, |
| "text": "(Xu et al., 2015)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Feature sets. We divided the features in three groups: (1) semantic overlap features (SO) from Section 3.1, (2) aligned word pairs (AWP) features, and (3) the anchor count feature (ANC) from Section 3.2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Model optimization. There are three ways how the optimization of the SVM model (hyperparameters C and \u03b3) could have been carried out: (1) training and optimization on the train set using 10-folded crossvalidation, with no use of the development set (model M1); (2) training on the train set and optimization on the development set (model M2), and (3) training on the union of the train and development set using 10folded cross-validation (model M3). Following the advice of the task organizers, we removed debatable cases from both the train and dev sets. We submitted models M1 and M2 for the official evaluation (our team name was TKLBLIIR). ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "A subset of the official ranking is shown in Table 4 .1. Our model M1 ranked fourth (sharing that place with FBK-HLT) in the official evaluation with a 1.5% lower F 1 score than the best-performing system. Our model M2 outperforms both baselines. The state-ofthe-art model MULTIP outperforms all participating systems. There is a notable performance gap between our two runs. We believe this comes from the high sensitivity of the performance on the test set to small changes in hyperparameter values. We elaborate more on this in the next section.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 45, |
| "end": 53, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Official Results", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In Table 4 .2 we show the performance of the models M1, M2, and M3 on the development and test set. We observe an unusual behavior for all three models: a model that performs good on the development set typically performs bad on the test set, and vice versa. Furthermore, optimal cross-validated F 1 performance on the train set is 72%, which is 7 points above the best performance on the validation set. We believe this may be indicative of significant differences in the distributions underlying the datasets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To investigate this further, we applied the Kolmogorov-Smirnov two-sample goodness-of-fit test (K-S test) (Daniel, 1990) for each of the used features to determine whether the train set is drawn from the same distribution as the development and test set. The K-S test is a nonparametric test that determines whether two independent samples differ in some respect, both in the measure of locations (means, median) and the shapes of the distributions (skewness, dispersion, kurtosis). The assumptions for the K-S test (independence of random samples and continuous variables) are met for all our features. We tested all features at the level of significance of 0.05 and rejected the null hypothesis for all features but one (bigram overlap). This confirms our initial assumption that the features in the train set are not identically distributed to those in the test set, bringing into question the representativeness of the test set. Reasons for this may include different annotation sources (crowdsourcing vs experts) and differences in time periods of tweets. Moreover, due to differences in the datasets, the performance is very much affected by the choice of the model optimization setup.", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 120, |
| "text": "(Daniel, 1990)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Due to volatile performance, it is difficult to say much about which features are most useful. However, we have observed consistent performance boosts in all settings when introducing topic-specific versions of features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Analysis", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We described TweetingJay, a supervised model for detecting Twitter paraphrases with which we participated in Task 1 of SemEval 2015. TweetingJay relies on features capturing semantic similarity and word alignments between tweets and achieves performance comparable to best-performing models on the task. On the methodological side, we investigated the cause for unusual behavior of our models on the different datasets. Our preliminary statistical analysis of the datasets seems to suggest that the underlying distributions datasets are significantly different. We believe this makes the performance estimates less reliable and suggest that the results should be taken with caution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "http://takelab.fer.hr/tweetingjay", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/ma-sultan/ monolingual-word-aligner", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We obtain lower results on the test set (61.3% F1 vs. 69.6%). This is likely caused by the use of slightly different features and perhaps by differences in implementation.4 http://nlp.stanford.edu/projects/glove/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "SemEval-2012 task 6: A pilot on semantic textual similarity", |
| "authors": [ |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "Aitor", |
| "middle": [], |
| "last": "Gonzalez-Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of SemEval 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "385--393", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eneko Agirre, Mona Diab, Daniel Cer, and Aitor Gonzalez-Agirre. 2012. SemEval-2012 task 6: A pi- lot on semantic textual similarity. In Proceedings of SemEval 2012, pages 385-393.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "SemEval-2014 task 10: Multilingual semantic textual similarity", |
| "authors": [ |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Carmen", |
| "middle": [], |
| "last": "Banea", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| }, |
| { |
| "first": "Aitor", |
| "middle": [], |
| "last": "Gonzalez-Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "Rigau", |
| "suffix": "" |
| }, |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "81--91", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eneko Agirre, Carmen Banea, Claire Cardie, Daniel Cer, Mona Diab, Aitor Gonzalez-Agirre, Weiwei Guo, Rada Mihalcea, German Rigau, and Janyce Wiebe. 2014. SemEval-2014 task 10: Multilingual semantic textual similarity. pages 81-91.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "How noisy social media text, how diffrnt social media sources", |
| "authors": [ |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Cook", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Lui", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mackinlay", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of IJCNLP 2013", |
| "volume": "", |
| "issue": "", |
| "pages": "356--364", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timothy Baldwin, Paul Cook, Marco Lui, Andrew MacKinlay, and Li Wang. 2013. How noisy social media text, how diffrnt social media sources. In Pro- ceedings of IJCNLP 2013, pages 356-364.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Applied nonparametric statistics. The Duxbury advanced series in statistics and decision sciences", |
| "authors": [ |
| { |
| "first": "Wayne", |
| "middle": [ |
| "W" |
| ], |
| "last": "Daniel", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wayne W. Daniel. 1990. Applied nonparametric statistics. The Duxbury advanced series in statistics and decision sciences.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Paraphrase identification as probabilistic quasi-synchronous recognition", |
| "authors": [ |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Noah", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of ACL 2009", |
| "volume": "", |
| "issue": "", |
| "pages": "468--476", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dipanjan Das and Noah A Smith. 2009. Paraphrase identi- fication as probabilistic quasi-synchronous recognition. In Proceedings of ACL 2009, pages 468-476.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Modeling sentences in the latent space", |
| "authors": [ |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of ACL 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "864--872", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Weiwei Guo and Mona Diab. 2012. Modeling sentences in the latent space. In Proceedings of ACL 2012, pages 864-872.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Generating phrasal and sentential paraphrases: A survey of datadriven methods", |
| "authors": [ |
| { |
| "first": "Nitin", |
| "middle": [], |
| "last": "Madnani", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [ |
| "J" |
| ], |
| "last": "Dorr", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Computational Linguistics", |
| "volume": "36", |
| "issue": "3", |
| "pages": "341--387", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitin Madnani and Bonnie J Dorr. 2010. Generating phrasal and sentential paraphrases: A survey of data- driven methods. Computational Linguistics, 36(3):341- 387.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Re-examining machine translation metrics for paraphrase identification", |
| "authors": [ |
| { |
| "first": "Nitin", |
| "middle": [], |
| "last": "Madnani", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Tetreault", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Chodorow", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of NAACL 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "182--190", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitin Madnani, Joel Tetreault, and Martin Chodorow. 2012. Re-examining machine translation metrics for paraphrase identification. In Proceedings of NAACL 2012, pages 182-190.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Quantitative analysis of culture using millions of digitized books", |
| "authors": [ |
| { |
| "first": "Jean-Baptiste", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan Kui", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Aviva", |
| "middle": [], |
| "last": "Presser Aiden", |
| "suffix": "" |
| }, |
| { |
| "first": "Adrian", |
| "middle": [], |
| "last": "Veres", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gray", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Joseph", |
| "suffix": "" |
| }, |
| { |
| "first": "Dale", |
| "middle": [], |
| "last": "Pickett", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Hoiberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clancy", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [], |
| "last": "Norvig", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Orwant", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Science", |
| "volume": "331", |
| "issue": "6014", |
| "pages": "176--182", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jean-Baptiste Michel, Yuan Kui Shen, Aviva Presser Aiden, Adrian Veres, Matthew K Gray, Joseph P Pickett, Dale Hoiberg, Dan Clancy, Peter Norvig, Jon Orwant, et al. 2011. Quantitative analysis of culture using mil- lions of digitized books. Science, 331(6014):176-182.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1301.3781" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013a. Efficient estimation of word representa- tions in vector space. arXiv preprint arXiv:1301.3781.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of NIPS 2013", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013b. Distributed representa- tions of words and phrases and their compositionality. In Proceedings of NIPS 2013, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Searching microblogs: coping with sparsity and document quality", |
| "authors": [ |
| { |
| "first": "Nasir", |
| "middle": [], |
| "last": "Naveed", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Gottron", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00e9r\u00f4me", |
| "middle": [], |
| "last": "Kunegis", |
| "suffix": "" |
| }, |
| { |
| "first": "Arifah Che", |
| "middle": [], |
| "last": "Alhadi", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of CIKM 2011", |
| "volume": "", |
| "issue": "", |
| "pages": "183--188", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nasir Naveed, Thomas Gottron, J\u00e9r\u00f4me Kunegis, and Ari- fah Che Alhadi. 2011. Searching microblogs: coping with sparsity and document quality. In Proceedings of CIKM 2011, pages 183-188.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP 2014", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1541", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of EMNLP 2014, pages 1532-1541.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Using paraphrases for improving first story detection in news and twitter", |
| "authors": [ |
| { |
| "first": "Sa\u0161a", |
| "middle": [], |
| "last": "Petrovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Miles", |
| "middle": [], |
| "last": "Osborne", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Lavrenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of NAACL 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "338--346", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sa\u0161a Petrovi\u0107, Miles Osborne, and Victor Lavrenko. 2012. Using paraphrases for improving first story detection in news and twitter. In Proceedings of NAACL 2012, pages 338-346.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "TakeLab: systems for measuring semantic text similarity", |
| "authors": [ |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Frane\u0161ari\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Mladen", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan\u0161najder", |
| "middle": [], |
| "last": "Karan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bojana Dalbelo", |
| "middle": [], |
| "last": "Ba\u0161i\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of SemEval 2012", |
| "volume": "", |
| "issue": "", |
| "pages": "441--448", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Frane\u0160ari\u0107, Goran Glava\u0161, Mladen Karan, Jan\u0160najder, and Bojana Dalbelo Ba\u0161i\u0107. 2012. TakeLab: systems for measuring semantic text similarity. In Proceedings of SemEval 2012, pages 441-448.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "DLS@CU: Sentence similarity from word alignment", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Md Arafat Sultan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tamara", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sumner", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of SemEval", |
| "volume": "", |
| "issue": "", |
| "pages": "241--245", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Md Arafat Sultan, Steven Bethard, and Tamara Sumner. 2014. DLS@CU: Sentence similarity from word align- ment. In Proceedings of SemEval 2014, pages 241- 245.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Gathering and generating paraphrases from twitter with application to normalization", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Sixth Workshop on Building and Using Comparable Corpora", |
| "volume": "", |
| "issue": "", |
| "pages": "121--128", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Xu, Alan Ritter, and Ralph Grishman. 2013. Gath- ering and generating paraphrases from twitter with ap- plication to normalization. In Proceedings of the Sixth Workshop on Building and Using Comparable Corpora, pages 121-128.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Extracting lexically divergent paraphrases from Twitter", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "B" |
| ], |
| "last": "Dolan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Transactions of the Association for Computational Linguistics (TACL)", |
| "volume": "2", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Xu, Alan Ritter, Chris Callison-Burch, William B. Dolan, and Yangfeng Ji. 2014. Extracting lexically divergent paraphrases from Twitter. Transactions of the Association for Computational Linguistics (TACL), 2(1).", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "SemEval-2015 Task 1: Paraphrase and semantic similarity in Twitter (PIT)", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "B" |
| ], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of SemEval", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Xu, Chris Callison-Burch, and William B. Dolan. 2015. SemEval-2015 Task 1: Paraphrase and semantic similarity in Twitter (PIT). In Proceedings of SemEval 2015.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Social context summarization", |
| "authors": [ |
| { |
| "first": "Zi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Keke", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhong", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Juanzi", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of ACM SIGIR 2011", |
| "volume": "", |
| "issue": "", |
| "pages": "255--264", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zi Yang, Keke Cai, Jie Tang, Li Zhang, Zhong Su, and Juanzi Li. 2011. Social context summarization. In Proceedings of ACM SIGIR 2011, pages 255-264.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Linguistic redundancy in twitter", |
| "authors": [ |
| { |
| "first": "Fabio", |
| "middle": [ |
| "Massimo" |
| ], |
| "last": "Zanzotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Pennacchiotti", |
| "suffix": "" |
| }, |
| { |
| "first": "Kostas", |
| "middle": [], |
| "last": "Tsioutsiouliklis", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of EMNLP 2011", |
| "volume": "", |
| "issue": "", |
| "pages": "659--669", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabio Massimo Zanzotto, Marco Pennacchiotti, and Kostas Tsioutsiouliklis. 2011. Linguistic redundancy in twitter. In Proceedings of EMNLP 2011, pages 659- 669.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "semantic overlap features, most of which are adaptations of STS features proposed by Sari\u0107 et al. (2012), and (2) word alignment features, based on (a) the output of the word alignment model by Sultan et al. (2014) and (b) a re-implementation of the MULTIP model by Xu et al. (2014).", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "Model optimization using different datasets.", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |