| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:59:33.291436Z" |
| }, |
| "title": "Learning Probabilistic Sentence Representations from Paraphrases", |
| "authors": [ |
| { |
| "first": "Mingda", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Toyota Technological Institute at Chicago", |
| "location": { |
| "postCode": "60637", |
| "settlement": "Chicago", |
| "region": "IL", |
| "country": "USA" |
| } |
| }, |
| "email": "mchen@ttic.edu" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Toyota Technological Institute at Chicago", |
| "location": { |
| "postCode": "60637", |
| "settlement": "Chicago", |
| "region": "IL", |
| "country": "USA" |
| } |
| }, |
| "email": "kgimpel@ttic.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Probabilistic word embeddings have shown effectiveness in capturing notions of generality and entailment, but there is very little work on doing the analogous type of investigation for sentences. In this paper we define probabilistic models that produce distributions for sentences. Our best-performing model treats each word as a linear transformation operator applied to a multivariate Gaussian distribution. We train our models on paraphrases and demonstrate that they naturally capture sentence specificity. While our proposed model achieves the best performance overall, we also show that specificity is represented by simpler architectures via the norm of the sentence vectors. Qualitative analysis shows that our probabilistic model captures sentential entailment and provides ways to analyze the specificity and preciseness of individual words.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Probabilistic word embeddings have shown effectiveness in capturing notions of generality and entailment, but there is very little work on doing the analogous type of investigation for sentences. In this paper we define probabilistic models that produce distributions for sentences. Our best-performing model treats each word as a linear transformation operator applied to a multivariate Gaussian distribution. We train our models on paraphrases and demonstrate that they naturally capture sentence specificity. While our proposed model achieves the best performance overall, we also show that specificity is represented by simpler architectures via the norm of the sentence vectors. Qualitative analysis shows that our probabilistic model captures sentential entailment and provides ways to analyze the specificity and preciseness of individual words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Probabilistic word embeddings have been shown to be useful for capturing notions of generality and entailment (Vilnis and McCallum, 2014; Athiwaratkun and Wilson, 2017; Athiwaratkun et al., 2018) . In particular, researchers have found that the entropy of a word roughly encodes its generality, even though there is no training signal explicitly targeting this effect. For example, hypernyms tend to have larger variance than their corresponding hyponyms (Vilnis and McCallum, 2014) . However, there is very little work on doing the analogous type of investigation for sentences.", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 137, |
| "text": "(Vilnis and McCallum, 2014;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 138, |
| "end": 168, |
| "text": "Athiwaratkun and Wilson, 2017;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 169, |
| "end": 195, |
| "text": "Athiwaratkun et al., 2018)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 455, |
| "end": 482, |
| "text": "(Vilnis and McCallum, 2014)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we define probabilistic models that produce distributions for sentences. In particular, we choose a simple and interpretable probabilistic model that treats each word as an operator that translates and scales a Gaussian random variable representing the sentence. Our models are able to capture sentence specificity as measured by the annotated datasets of Li and Nenkova (2015) and Ko et al. (2019) by training solely on noisy paraphrase pairs. While our \"wordoperator\" model yields the strongest performance, we also show that specificity is represented by simpler architectures via the norm of the sentence vectors. Qualitative analysis shows that our models represent sentences in ways that correspond to the entailment relationship and that individual word parameters can be analyzed to find words with varied and precise meanings.", |
| "cite_spans": [ |
| { |
| "start": 371, |
| "end": 392, |
| "text": "Li and Nenkova (2015)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 397, |
| "end": 413, |
| "text": "Ko et al. (2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We propose a model that uses ideas from flowbased variational autoencoders (VAEs) (Rezende and Mohamed, 2015; Kingma et al., 2016) by treating each word as an \"operator\". Intuitively, we assume there is a random variable z associated with each sentence", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 109, |
| "text": "(Rezende and Mohamed, 2015;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 110, |
| "end": 130, |
| "text": "Kingma et al., 2016)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Methods", |
| "sec_num": "2" |
| }, |
| { |
| "text": "s = {w 1 , w 2 , \u2022 \u2022 \u2022 , w n }.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Methods", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The random variable initially follows a standard multivariate Gaussian distribution. Then, each word in the sentence transforms the random variable sequentially, leading to a random variable that encodes its semantic information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Methods", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our word linear operator model (WLO) has two types of parameters for each word w i : a scaling factor A i \u2208 R k and a translation factor B i \u2208 R k . The word operators produce a sequence of random variables", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Methods", |
| "sec_num": "2" |
| }, |
| { |
| "text": "z 0 , z 1 , \u2022 \u2022 \u2022 , z n with z 0 \u223c N (0, I k ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Methods", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where I k is a k \u00d7 k identity matrix, and the operations are defined as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Methods", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "z i = A i (z i\u22121 + B i )", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Proposed Methods", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The means and variances for each random variable are computed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Methods", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u00b5 i = A i (\u00b5 i\u22121 + B i )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Proposed Methods", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03a3 i = A i \u03a3 i\u22121 A i", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Proposed Methods", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For computational efficiency, we only consider diagonal covariance matrices, so the equations above can be further simplified.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Methods", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Following Wieting and Gimpel (2018) , all of our models are trained with a margin-based loss on paraphrase pairs (s 1 , s 2 ):", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 35, |
| "text": "Wieting and Gimpel (2018)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "max(0, \u03b4 \u2212 d(s 1 , s 2 ) + d(s 1 , n 1 ))+ max(0, \u03b4 \u2212 d(s 1 , s 2 ) + d(s 2 , n 2 ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where \u03b4 is the margin and d is a similarity function that takes a pair of sentences and outputs a scalar denoting their similarity. The similarity function is maximized over a subset of examples (typically, the mini-batch) to choose negative examples n 1 and n 2 . When doing so, we use \"mega-batching\" (Wieting and Gimpel, 2018) and fix the megabatch size at 20. For deterministic models, d is cosine similarity, while for probabilistic models, we use the expected inner product of Gaussians.", |
| "cite_spans": [ |
| { |
| "start": 303, |
| "end": 329, |
| "text": "(Wieting and Gimpel, 2018)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Let \u00b5 1 , \u00b5 2 be mean vectors and \u03a3 1 , \u03a3 2 be the variances predicted by models for a pair of input sentences. For the choice of d, following Vilnis and McCallum (2014) , we use the expected inner product of Gaussian distributions:", |
| "cite_spans": [ |
| { |
| "start": 143, |
| "end": 169, |
| "text": "Vilnis and McCallum (2014)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Expected Inner Product of Gaussians", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "x\u2208R k N (x; \u00b5 1 , \u03a3 1 )N (x; \u00b5 2 , \u03a3 2 )dx = log N (0; \u00b5 1 \u2212 \u00b5 2 , \u03a3 1 + \u03a3 2 ) = \u2212 1 2 log det (\u03a3 1 + \u03a3 2 ) \u2212 d 2 log(2\u03c0) \u2212 1 2 (\u00b5 1 \u2212 \u00b5 2 ) (\u03a3 1 + \u03a3 2 ) \u22121 (\u00b5 1 \u2212 \u00b5 2 )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Expected Inner Product of Gaussians", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For diagonal matrices \u03a3 1 and \u03a3 2 , the equation above can be computed analytically.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Expected Inner Product of Gaussians", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To avoid the mean or variance of the Gaussian distributions from becoming unbounded during training, resulting in degenerate solutions, we impose prior constraints on the operators introduced above. We force the transformed distribution after each operator to be relatively close to N (0, I k ), which can be thought of as our \"prior\" knowledge of the operator. Then our training additionally minimizes", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regularization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u03bb s\u2208{s 1 ,s 2 ,n 1 ,n 2 } w\u2208s KL(N (\u00b5(w), \u03a3(w)) N (0, I))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regularization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where \u03bb is a hyperparameter tuned based on the performance on the 2017 semantic textual similarity (STS; Cer et al., 2017) data. We found prior regularization very important, as will be shown in our results. For fair comparison, we also add L2 regularization to the baseline models.", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 122, |
| "text": "Cer et al., 2017)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regularization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We consider two baselines that have shown strong results on sentence similarity tasks (Wieting and Gimpel, 2018) . The first, word averaging (WORDAVG), simply averages the word embeddings in the sentence. The second, long shortterm memory (LSTM; Hochreiter and Schmidhuber, 1997) averaging (LSTMAVG), uses an LSTM to encode the sentence and averages the hidden vectors. Inspired by sentence VAEs (Bowman et al., 2016), we consider an LSTM based probabilistic baseline (LSTMGAUSSIAN) which builds upon LSTMAVG and uses separate linear transformations on the averaged hidden states to produce the mean and variance of a Gaussian distribution. We also benchmark several pretrained models, including GloVe (Pennington et al., 2014) , Skipthought (Kiros et al., 2015) , InferSent (Conneau et al., 2017) , BERT (Devlin et al., 2019) , and ELMo (Peters et al., 2018) . When using GloVe, we either sum embeddings (GloVe SUM) or average them (GloVe AVG) to produce a sentence vector. Similarly, for ELMo, we either sum the outputs from the last layer (ELMo SUM) or average them (ELMo AVG). For BERT, we take the representation for the \"[CLS]\" token.", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 112, |
| "text": "(Wieting and Gimpel, 2018)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 702, |
| "end": 727, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 742, |
| "end": 762, |
| "text": "(Kiros et al., 2015)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 775, |
| "end": 797, |
| "text": "(Conneau et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 805, |
| "end": 826, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 838, |
| "end": 859, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline Methods", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We use the preprocessed version of ParaNMT-50M (Wieting and Gimpel, 2018) as our training set, which consists of 5 million paraphrase pairs.", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 73, |
| "text": "(Wieting and Gimpel, 2018)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For evaluating sentence specificity, we use human-annotated test sets from four domains, including news, Twitter, Yelp reviews, and movie reviews, from Li and Nenkova (2015) and Ko et al. (2019) . For the news dataset, labels are either \"general\" or \"specific\" and there is additionally a training set. For the other datasets, labels are real values indicating specificity. Statistics for these datasets are shown in Table 1 .", |
| "cite_spans": [ |
| { |
| "start": 178, |
| "end": 194, |
| "text": "Ko et al. (2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 417, |
| "end": 424, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For analysis we also use the semantic textual similarity (STS) benchmark test set (Cer et al., 2017) and the Stanford Natural Language Inference (SNLI) dataset (Bowman et al., 2015).", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 100, |
| "text": "(Cer et al., 2017)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For predicting specificity in the news domain, we threshold the predictions either based on the entropy of Gaussian distributions produced from probabilistic models or based on the norm of vectors produced by deterministic models, which includes all of the pretrained models. The threshold is tuned based on the training set but no other training or tuning is done for this task with any of our models. For prediction in other domains, we simply compute the Spearman correlations between the entropy/norm and the labels. Intuitively, when sentences are longer, they tend to be more specific. So, we report baselines (\"Length\") that predict specificity solely based on length, by thresholding the sentence length for news (choosing the threshold using the training set) or simply returning the length for the others. The latter results are reported from Ko et al. (2019) . We also consider baselines that average or sum ranks of word frequencies within a sentence (\"Word Freq. AVG\" and \"Word Freq. SUM\"). Table 2 shows results on sentence specificity tasks. We compare to the best-performing models reported by Li and Nenkova (2015) and Ko et al. (2019) . Their models are specifically designed for predicting sentence specificity and they both use labeled training data from the news domain.", |
| "cite_spans": [ |
| { |
| "start": 853, |
| "end": 869, |
| "text": "Ko et al. (2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1136, |
| "end": 1152, |
| "text": "Ko et al. (2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1004, |
| "end": 1011, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Specificity Prediction Setup", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Our averaging-based models (WORDAVG, LSTMAVG) failed on this task, either giving the majority class accuracy or negative correlations. So, we also evaluate WORDSUM, which sums word embeddings instead of averaging and shows strong performance compared to the other models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Specificity", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "While the model from Li and Nenkova (2015) performs quite well in the news domain, its performance drops on other domains, indicating some amount of overfitting. On the other hand, WORD-SUM and WLO, which are trained on a large number of paraphrases, perform consistently across the four domains and both outperform the supervised models on Yelp. Additionally, our WLO model outperforms all our other models, achieving comparable performance to the supervised methods. Among pretrained models, BERT, Skipthought, ELMo SUM, and GloVe SUM show slight correlations with specificity, while InferSent performs strongly across domains. InferSent uses supervised training on a large manually-annotated dataset (SNLI) while WORDSUM and WLO are trained on automatically-generated paraphrases and still show results comparable to InferSent.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Specificity", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "To control for effects due to sentence length, we design another experiment in which sentences from News training and test are grouped by length, and thresholds are tuned on the group of length k and tested on the group of length k \u2212 1, for all k, leading to a pool of 3582 test sentences. Table 3 shows the results. In this lengthnormalized experiment, the averaging models demonstrate much better performance and even outperform WORDSUM, but still WLO has the best performance. We test models on the SNLI test set, assuming that for a given premise p and hypothesis h, p is more specific than h for entailing sentence pairs. To avoid effects due to sentence length, we only consider p, h pairs with the same length. After this filtering, entailment/neural/contradiction categories have 120/192/208 instances respectively. We encode each sentence and calculate the percentage of cases in which the hypothesis has larger entropy (or smaller norm for non-probabilistic models) than the premise. Under an ideal model, this would happen with 100% of entailing pairs while showing random results (50%) for the other two types of pairs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 290, |
| "end": 297, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentence Specificity", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "As shown in Table 4 , our best paraphrasetrained models show similar trends to InferSent, achieving around 75% accuracy in the entailment category and around 50% accuracy in other categories. Although ELMo can also achieve similar accuracy in the entailment category, it seems to conflate entailment with contradiction, where it shows the highest percentage of all models. Other models, including BERT, GloVe, and Skipthought, are much closer to random (50%) for entailing pairs. impact of words on sentence representations. We ranked words under several criteria based on their translation parameter norms and single-word sentence entropies. Table 5 shows the top 20 words under each criterion. Words with small norm and small absolute entropy have little effect, both in terms of meaning and specificity; they are mostly function words. Words with large norm and small entropy have a large impact on the sentence while also making it more specific. They are organization names (cenelec) or technical terms found in medical or scientific literature. When they appear in a sentence, they are very likely to appear in its paraphrase.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 643, |
| "end": 650, |
| "text": "Table 5", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentence Specificity", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Words with large norm and small absolute entropy contribute to the sentence semantics but do not make it more specific. Words like microwave and synthetic appear in many contexts and have multiple senses. Names (trent, alison) also appear in many contexts. Words like these often appear in a sentence's paraphrase, but can also appear in many other sentences in different contexts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "Words with small norm/entropy make sentences more specific but do not lend themselves to a precise characterization. They affect sentence meaning, but can be expressed in many ways. For example, when beneficiaries appears in a sentence, its paraphrase often has a synonym like beneficiary, heirs, or grantees. These words may have multiple senses, but it appears more that they correspond to WORDSUM WLO largest norm (specific) smallest norm (general) smallest entropy (specific) largest entropy (general) this regulation shall not apply to wine grape products, with the exception of wine vinegar, spirit drinks or flavoured wines. oh, man, you're gonna... you're just gonna get it, vause * , aren't you ?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "under a light coating of dew she was a velvet study in reflected mauve with rose overtones against the indigo nightward * sky. oh, man, you're gonna... you're just gonna get it, vause * , aren't you?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "operating revenue community subsidies other subsidies/revenue * total (a) operating expenditure staff administration operating activities total (b) operating result (c=ab) okay, i know you don't get relationships, like, at all, but i don't need to screw anyone for an \"a.\" a similar influenza disease occurred in 47% of patients who received plegridy 125 micrograms every 2 weeks, and 13% of the patients were given placebo. 'authorisation' means an instrument issued in any form by the authorities by which the right to carry on the business of a credit institution is granted; concepts with many valid ways of expression.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Lexical", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We subsample the ParaNMT training set and group sentences by length. For each model and length, we pick the sentence with either highest/lowest entropy or largest/smallest norm values. Table 6 shows some examples. WORDSUM tends to choose conversational sentences as general and those with many rare words as specific. WLO favors literary and technical/scientific sentences as most specific, and bureaucratic/official language as most general.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 185, |
| "end": 192, |
| "text": "Table 6", |
| "ref_id": "TABREF9" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentential Analysis", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "As shown in Table 7 , there is a large performance improvement after adding prior regularization for avoiding degenerate solutions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 7", |
| "ref_id": "TABREF10" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Prior Regularization", |
| "sec_num": "6.4" |
| }, |
| { |
| "text": "Although semantic textual similarity is not our target task, we still include the performance of our models on the STS benchmark test set in Table 8 to show that our models are competitive with standard strong baselines. When using probabilistic models to predict sentence similarity during test time, we let v 1 = concat(\u00b5 1 , \u03a3 1 ), v 2 = concat(\u00b5 2 , \u03a3 2 ), where concat is a concatenation operation, and predict sentence similarity via cosine(v 1 , v 2 ), since we find it performs better than solely using the mean vectors. The two probabilistic models, LSTMGAUSSIAN and WLO, are able to outperform the baselines slightly.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 141, |
| "end": 148, |
| "text": "Table 8", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Semantic Textual Similarity", |
| "sec_num": "6.5" |
| }, |
| { |
| "text": "Our models are related to work in learning probabilistic word embeddings (Vilnis and McCallum, 2014; Athiwaratkun and Wilson, 2017; Athiwaratkun et al., 2018 ) and text-based VAEs (Miao et al., 2016; Bowman et al., 2016; Yang et al., 2017; Kim et al., 2018; Xu and Durrett, 2018, inter alia) . The WLO is also related to flow-based VAEs (Rezende and Mohamed, 2015; Kingma et al., 2016) , where hidden layers are viewed as operators over the density function of latent variables. Previous work on sentence specificity relies on hand-crafted features or direct training on annotated data (Louis and Nenkova, 2011; Li and Nenkova, 2015) . Recently, Ko et al. (2019) used domain adaptation for this problem when only the source domain has annotations. Our work also relates to learning sentence embeddings from paraphrase pairs (Wieting et al., 2016; Wieting and Gimpel, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 100, |
| "text": "(Vilnis and McCallum, 2014;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 101, |
| "end": 131, |
| "text": "Athiwaratkun and Wilson, 2017;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 132, |
| "end": 157, |
| "text": "Athiwaratkun et al., 2018", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 180, |
| "end": 199, |
| "text": "(Miao et al., 2016;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 200, |
| "end": 220, |
| "text": "Bowman et al., 2016;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 221, |
| "end": 239, |
| "text": "Yang et al., 2017;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 240, |
| "end": 257, |
| "text": "Kim et al., 2018;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 258, |
| "end": 291, |
| "text": "Xu and Durrett, 2018, inter alia)", |
| "ref_id": null |
| }, |
| { |
| "start": 337, |
| "end": 364, |
| "text": "(Rezende and Mohamed, 2015;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 365, |
| "end": 385, |
| "text": "Kingma et al., 2016)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 586, |
| "end": 611, |
| "text": "(Louis and Nenkova, 2011;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 612, |
| "end": 633, |
| "text": "Li and Nenkova, 2015)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 646, |
| "end": 662, |
| "text": "Ko et al. (2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 824, |
| "end": 846, |
| "text": "(Wieting et al., 2016;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 847, |
| "end": 872, |
| "text": "Wieting and Gimpel, 2018)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We trained sentence models on paraphrase pairs and showed that they naturally capture specificity and entailment. Our proposed WLO model, which treats each word as a linear transformation operator, achieves the best performance and lends itself to analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the anonymous reviewers, NVIDIA for donating GPUs used in this research, Jessy Li for clarifying the experimental setup used in Li and Nenkova (2015) , and Google for a faculty research award to K. Gimpel that partially supported this research.", |
| "cite_spans": [ |
| { |
| "start": 151, |
| "end": 172, |
| "text": "Li and Nenkova (2015)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| }, |
| { |
| "text": "For all experiments, the dimension of word embeddings and word operator is 50. The dimension of LSTM is 100. The dimension of Gaussian distribution for LSTMGAUSSIAN is 100. Mini-batch size is 100. For LSTM, LSTMGAUSSIAN, and WLO, we scramble training sentences with a probability of 0.4. For baseline models, the margin \u03b4 is 0.4. For other models, \u03b4 is 1. All models are randomly initialized and trained with Adam (Kingma and Ba, 2014) using learning rate of 0.001.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1 Hyperparameters", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Multimodal word distributions", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Athiwaratkun", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1645--1656", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1151" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben Athiwaratkun and Andrew Wilson. 2017. Mul- timodal word distributions. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 1645-1656. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Probabilistic FastText for multi-sense word embeddings", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Athiwaratkun", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Anima", |
| "middle": [], |
| "last": "Anandkumar", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1--11", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1001" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben Athiwaratkun, Andrew Wilson, and Anima Anandkumar. 2018. Probabilistic FastText for multi-sense word embeddings. In Proceedings of the 56th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers), pages 1-11, Melbourne, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A large annotated corpus for learning natural language inference", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Potts", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "632--642", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D15-1075" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large anno- tated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empiri- cal Methods in Natural Language Processing, pages 632-642. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Generating sentences from a continuous space", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vilnis", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Rafal", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Jozefowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "10--21", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K16-1002" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Luke Vilnis, Oriol Vinyals, An- drew Dai, Rafal Jozefowicz, and Samy Bengio. 2016. Generating sentences from a continuous space. In Proceedings of The 20th SIGNLL Confer- ence on Computational Natural Language Learning, pages 10-21. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Semeval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Inigo", |
| "middle": [], |
| "last": "Lopez-Gazpio", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--14", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S17-2001" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Cer, Mona Diab, Eneko Agirre, Inigo Lopez- Gazpio, and Lucia Specia. 2017. Semeval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation. In Proceedings of the 11th International Workshop on Semantic Eval- uation (SemEval-2017), pages 1-14. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Supervised learning of universal sentence representations from natural language inference data", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "670--680", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1070" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo\u00efc Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In Proceedings of the 2017 Conference on Empirical Methods in Nat- ural Language Processing, pages 670-680. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Semi-amortized variational autoencoders", |
| "authors": [ |
| { |
| "first": "Yoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Wiseman", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 35th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "2678--2687", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoon Kim, Sam Wiseman, Andrew Miller, David Son- tag, and Alexander Rush. 2018. Semi-amortized variational autoencoders. In Proceedings of the 35th International Conference on Machine Learning, vol- ume 80 of Proceedings of Machine Learning Re- search, pages 2678-2687, Stockholmsmssan, Stock- holm Sweden. PMLR.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Improved variational inference with inverse autoregressive flow", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "Rafal", |
| "middle": [], |
| "last": "Salimans", |
| "suffix": "" |
| }, |
| { |
| "first": "Xi", |
| "middle": [], |
| "last": "Jozefowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Welling", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "29", |
| "issue": "", |
| "pages": "4743--4751", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, and Max Welling. 2016. Improved variational inference with inverse autore- gressive flow. In D. D. Lee, M. Sugiyama, U. V. Luxburg, I. Guyon, and R. Garnett, editors, Ad- vances in Neural Information Processing Systems 29, pages 4743-4751. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Raquel Urtasun, and Sanja Fidler", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "Yukun", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zemel", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Torralba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 28th International Conference on Neural Information Processing Systems", |
| "volume": "2", |
| "issue": "", |
| "pages": "3294--3302", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Kiros, Yukun Zhu, Ruslan Salakhutdinov, Richard S. Zemel, Antonio Torralba, Raquel Ur- tasun, and Sanja Fidler. 2015. Skip-thought vec- tors. In Proceedings of the 28th International Con- ference on Neural Information Processing Systems - Volume 2, NIPS'15, pages 3294-3302, Cambridge, MA, USA. MIT Press.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Domain agnostic real-valued specificity prediction", |
| "authors": [ |
| { |
| "first": "Wei-Jen", |
| "middle": [], |
| "last": "Ko", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Durrett", |
| "suffix": "" |
| }, |
| { |
| "first": "Junyi Jessy", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Thirty-Third AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "6610--6617", |
| "other_ids": { |
| "DOI": [ |
| "10.1609/aaai.v33i01.33016610" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei-Jen Ko, Greg Durrett, and Junyi Jessy Li. 2019. Domain agnostic real-valued specificity prediction. In Proceedings of the Thirty-Third AAAI Conference on Artificial Intelligence, pages 6610-6617. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Fast and accurate prediction of sentence specificity", |
| "authors": [ |
| { |
| "first": "Jessy", |
| "middle": [], |
| "last": "Junyi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "2281--2287", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junyi Jessy Li and Ani Nenkova. 2015. Fast and accu- rate prediction of sentence specificity. In Proceed- ings of the Twenty-Ninth AAAI Conference on Artifi- cial Intelligence, pages 2281-2287. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Automatic identification of general and specific sentences by leveraging discourse annotations", |
| "authors": [ |
| { |
| "first": "Annie", |
| "middle": [], |
| "last": "Louis", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of 5th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "605--613", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Annie Louis and Ani Nenkova. 2011. Automatic iden- tification of general and specific sentences by lever- aging discourse annotations. In Proceedings of 5th International Joint Conference on Natural Lan- guage Processing, pages 605-613. Asian Federation of Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Neural variational inference for text processing", |
| "authors": [ |
| { |
| "first": "Yishu", |
| "middle": [], |
| "last": "Miao", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 33rd International Conference on International Conference on Machine Learning", |
| "volume": "48", |
| "issue": "", |
| "pages": "1727--1736", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yishu Miao, Lei Yu, and Phil Blunsom. 2016. Neu- ral variational inference for text processing. In Pro- ceedings of the 33rd International Conference on In- ternational Conference on Machine Learning -Vol- ume 48, ICML'16, pages 1727-1736. JMLR.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/D14-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In Proceedings of the 2014 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1202" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Variational inference with normalizing flows", |
| "authors": [ |
| { |
| "first": "Danilo", |
| "middle": [], |
| "last": "Rezende", |
| "suffix": "" |
| }, |
| { |
| "first": "Shakir", |
| "middle": [], |
| "last": "Mohamed", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 32nd International Conference on Machine Learning", |
| "volume": "37", |
| "issue": "", |
| "pages": "1530--1538", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danilo Rezende and Shakir Mohamed. 2015. Varia- tional inference with normalizing flows. In Proceed- ings of the 32nd International Conference on Ma- chine Learning, volume 37 of Proceedings of Ma- chine Learning Research, pages 1530-1538, Lille, France. PMLR.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Word representations via Gaussian embedding", |
| "authors": [ |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Vilnis", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6623" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luke Vilnis and Andrew McCallum. 2014. Word representations via Gaussian embedding. arXiv preprint arXiv:1412.6623.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Towards universal paraphrastic sentence embeddings", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Wieting", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Livescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Wieting, Mohit Bansal, Kevin Gimpel, and Karen Livescu. 2016. Towards universal paraphrastic sen- tence embeddings. In Proceedings of International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "ParaNMT-50M: Pushing the limits of paraphrastic sentence embeddings with millions of machine translations", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Wieting", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "451--462", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Wieting and Kevin Gimpel. 2018. ParaNMT- 50M: Pushing the limits of paraphrastic sentence embeddings with millions of machine translations. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 451-462. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Spherical latent spaces for stable variational autoencoders", |
| "authors": [ |
| { |
| "first": "Jiacheng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Durrett", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "4503--4513", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiacheng Xu and Greg Durrett. 2018. Spherical latent spaces for stable variational autoencoders. In Pro- ceedings of the 2018 Conference on Empirical Meth- ods in Natural Language Processing, pages 4503- 4513. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Improved variational autoencoders for text modeling using dilated convolutions", |
| "authors": [ |
| { |
| "first": "Zichao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiting", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Taylor", |
| "middle": [], |
| "last": "Berg-Kirkpatrick", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 34th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "3881--3890", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zichao Yang, Zhiting Hu, Ruslan Salakhutdinov, and Taylor Berg-Kirkpatrick. 2017. Improved varia- tional autoencoders for text modeling using dilated convolutions. In Proceedings of the 34th Inter- national Conference on Machine Learning, vol- ume 70 of Proceedings of Machine Learning Re- search, pages 3881-3890, International Convention Centre, Sydney, Australia. PMLR.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null, |
| "text": "Sizes of test sets for sentence specificity." |
| }, |
| "TABREF3": { |
| "num": null, |
| "content": "<table><tr><td/><td>Full</td><td>Length norm.</td></tr><tr><td>Majority baseline</td><td>54.6</td><td>50.1</td></tr><tr><td>WORDAVG</td><td>54.6</td><td>69.0</td></tr><tr><td>WORDSUM</td><td>75.8</td><td>68.6</td></tr><tr><td>LSTMAVG</td><td>54.6</td><td>69.6</td></tr><tr><td>LSTMGAUSSIAN</td><td>55.5</td><td>67.0</td></tr><tr><td>WLO</td><td>77.4</td><td>70.1</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "text": "" |
| }, |
| "TABREF4": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null, |
| "text": "Accuracy (%) for the specificity News test set, in both the original and length normalized conditions." |
| }, |
| "TABREF6": { |
| "num": null, |
| "content": "<table><tr><td>6 Analysis</td></tr><tr><td>6.1 Sentence Entailment</td></tr><tr><td>Vilnis and McCallum (2014) explored whether</td></tr><tr><td>their Gaussian word entropies captured the lexi-</td></tr><tr><td>cal entailment relationship. Here we analyze the</td></tr><tr><td>extent to which our representations capture sen-</td></tr><tr><td>tential entailment.</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "text": "Percentage of cases in which hypothesis has larger entropy (or smaller norm for non-probabilistic models) than premise for equal-length sentence pairs in the SNLI test set. In this setting, GloVe and ELMo would give the same results under either SUM or AVG." |
| }, |
| "TABREF8": { |
| "num": null, |
| "content": "<table><tr><td>: Examples showing top-20 lists of large-norm</td></tr><tr><td>or small-norm words ranked based on small absolute</td></tr><tr><td>entropy or small entropy in WLO.</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "text": "" |
| }, |
| "TABREF9": { |
| "num": null, |
| "content": "<table><tr><td colspan=\"2\">With Prior Without Prior</td></tr><tr><td>Acc. F1 Acc.</td><td>F1</td></tr><tr><td>WLO 77.4 78.4 67.9</td><td>68.2</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "text": "Examples of most general and specific sentences for selected lengths (* = mapped to unknown symbol)." |
| }, |
| "TABREF10": { |
| "num": null, |
| "content": "<table><tr><td/><td>STS Benchmark</td></tr><tr><td>WORDAVG</td><td>73.4</td></tr><tr><td>LSTMAVG</td><td>73.6</td></tr><tr><td>LSTMGAUSSIAN</td><td>74.3</td></tr><tr><td>WLO</td><td>73.7</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "text": "Accuracy (%) and F 1 score (%) for specificity News test set with and without prior regularization." |
| }, |
| "TABREF11": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null, |
| "text": "Pearson correlation (%) for STS benchmark test set. Highest number is in bold." |
| } |
| } |
| } |
| } |