| { |
| "paper_id": "P19-1011", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:33:24.994482Z" |
| }, |
| "title": "Learning Compressed Sentence Representations for On-Device Text Processing", |
| "authors": [ |
| { |
| "first": "Dinghan", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Stanford University", |
| "location": {} |
| }, |
| "email": "dinghan.shen@duke.edu" |
| }, |
| { |
| "first": "Pengyu", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Stanford University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Dhanasekar", |
| "middle": [], |
| "last": "Sundararaman", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Stanford University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Xinyuan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Stanford University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Stanford University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Meng", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Stanford University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Asli", |
| "middle": [], |
| "last": "Celikyilmaz", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Stanford University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "Carin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Stanford University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Duke", |
| "middle": [], |
| "last": "University", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Stanford University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Microsoft", |
| "middle": [], |
| "last": "Research", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Stanford University", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Vector representations of sentences, trained on massive text corpora, are widely used as generic sentence embeddings across a variety of NLP problems. The learned representations are generally assumed to be continuous and real-valued, giving rise to a large memory footprint and slow retrieval speed, which hinders their applicability to low-resource (memory and computation) platforms, such as mobile devices. In this paper, we propose four different strategies to transform continuous and generic sentence embeddings into a binarized form, while preserving their rich semantic information. The introduced methods are evaluated across a wide range of downstream tasks, where the binarized sentence embeddings are demonstrated to degrade performance by only about 2% relative to their continuous counterparts, while reducing the storage requirement by over 98%. Moreover, with the learned binary representations, the semantic relatedness of two sentences can be evaluated by simply calculating their Hamming distance, which is more computational efficient compared with the inner product operation between continuous embeddings. Detailed analysis and case study further validate the effectiveness of proposed methods.", |
| "pdf_parse": { |
| "paper_id": "P19-1011", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Vector representations of sentences, trained on massive text corpora, are widely used as generic sentence embeddings across a variety of NLP problems. The learned representations are generally assumed to be continuous and real-valued, giving rise to a large memory footprint and slow retrieval speed, which hinders their applicability to low-resource (memory and computation) platforms, such as mobile devices. In this paper, we propose four different strategies to transform continuous and generic sentence embeddings into a binarized form, while preserving their rich semantic information. The introduced methods are evaluated across a wide range of downstream tasks, where the binarized sentence embeddings are demonstrated to degrade performance by only about 2% relative to their continuous counterparts, while reducing the storage requirement by over 98%. Moreover, with the learned binary representations, the semantic relatedness of two sentences can be evaluated by simply calculating their Hamming distance, which is more computational efficient compared with the inner product operation between continuous embeddings. Detailed analysis and case study further validate the effectiveness of proposed methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Learning general-purpose sentence representations from large training corpora has received widespread attention in recent years. The learned sentence embeddings can encapsulate rich prior knowledge of natural language, which has been demonstrated to facilitate a variety of downstream tasks (without fine-tuning the encoder weights). The generic sentence embeddings can be trained either in an unsupervised manner (Kiros et al., 2015; Hill et al., 2016; Jernite et al., 2017; Gan * Equal contribution. et al., 2017; Logeswaran and Lee, 2018; Pagliardini et al., 2018) , or with supervised tasks such as paraphrase identification (Wieting et al., 2016) , natural language inference (Conneau et al., 2017) , discourse relation classification (Nie et al., 2017) , machine translation (Wieting and Gimpel, 2018) , etc.", |
| "cite_spans": [ |
| { |
| "start": 414, |
| "end": 434, |
| "text": "(Kiros et al., 2015;", |
| "ref_id": null |
| }, |
| { |
| "start": 435, |
| "end": 453, |
| "text": "Hill et al., 2016;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 454, |
| "end": 475, |
| "text": "Jernite et al., 2017;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 476, |
| "end": 515, |
| "text": "Gan * Equal contribution. et al., 2017;", |
| "ref_id": null |
| }, |
| { |
| "start": 516, |
| "end": 541, |
| "text": "Logeswaran and Lee, 2018;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 542, |
| "end": 567, |
| "text": "Pagliardini et al., 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 629, |
| "end": 651, |
| "text": "(Wieting et al., 2016)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 681, |
| "end": 703, |
| "text": "(Conneau et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 740, |
| "end": 758, |
| "text": "(Nie et al., 2017)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 781, |
| "end": 807, |
| "text": "(Wieting and Gimpel, 2018)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Significant effort has been devoted to designing better training objectives for learning sentence embeddings. However, prior methods typically assume that the general-purpose sentence representations are continuous and real-valued. However, this assumption is sub-optimal from the following perspectives: i) the sentence embeddings require large storage or memory footprint; ii) it is computationally expensive to retrieve semanticallysimilar sentences, since every sentence representation in the database needs to be compared, and the inner product operation is computationally involved. These two disadvantages hinder the applicability of generic sentence representations to mobile devices, where a relatively tiny memory footprint and low computational capacity are typically available (Ravi and Kozareva, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 789, |
| "end": 814, |
| "text": "(Ravi and Kozareva, 2018)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we aim to mitigate the above issues by binarizing the continuous sentence embeddings. Consequently, the embeddings require much smaller footprint, and similar sentences can be obtained by simply selecting those with closest binary codes in the Hamming space (Kiros and Chan, 2018) . One simple idea is to naively binarize the continuous vectors by setting a hard threshold. However, we find that this strategy leads to significant performance drop in the empirical results. Besides, the dimension of the binary sentence embeddings cannot be flexibly chosen with this strategy, further limiting the practice use of the direct binarization method.", |
| "cite_spans": [ |
| { |
| "start": 273, |
| "end": 295, |
| "text": "(Kiros and Chan, 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this regard, we propose three alternative strategies to parametrize the transformation from pre-trained generic continuous embeddings to their binary forms. Our exploration spans from simple operations, such as a random projection, to deep neural network models, such as a regularized autoencoder. Particularly, we introduce a semantic-preserving objective, which is augmented with the standard autoenoder architecture to encourage abstracting informative binary codes. InferSent (Conneau et al., 2017) is employed as the testbed sentence embeddings in our experiments, but the binarization schemes proposed here can easily be extended to other pretrained general-purpose sentence embeddings. We evaluate the quality of the learned general-purpose binary representations using the SentEval toolkit (Conneau et al., 2017) . It is observed that the inferred binary codes successfully maintain the semantic features contained in the continuous embeddings, and only lead to around 2% performance drop on a set of downstream NLP tasks, while requiring merely 1.5% memory footprint of their continuous counterparts.", |
| "cite_spans": [ |
| { |
| "start": 483, |
| "end": 505, |
| "text": "(Conneau et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 801, |
| "end": 823, |
| "text": "(Conneau et al., 2017)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Moreover, on several sentence matching benchmarks, we demonstrate that the relatedness between a sentence pair can be evaluated by simply calculating the Hamming distance between their binary codes, which perform on par with or even superior than measuring the cosine similarity between continuous embeddings (see Table 1). Note that computing the Hamming distance is much more computationally efficient than the inner product operation in a continuous space. We further perform a K-nearest neighbor sentence retrieval experiment on the SNLI dataset (Bowman et al., 2015) , and show that those semanticallysimilar sentences can indeed be efficiently retrieved with off-the-shelf binary sentence representations. Summarizing, our contributions in this paper are as follows:", |
| "cite_spans": [ |
| { |
| "start": 550, |
| "end": 571, |
| "text": "(Bowman et al., 2015)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "i) to the best of our knowledge, we conduct the first systematic exploration on learning general-purpose binarized (memory-efficient) sentence representations, and four different strategies are proposed;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "ii) an autoencoder architecture with a carefullydesigned semantic-preserving loss exhibits strong empirical results on a set of downstream NLP tasks;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "iii) more importantly, we demonstrate, on several sentence-matching datasets, that simply evaluating the Hamming distance over binary repre-sentations performs on par or even better than calculating the cosine similarity between their continuous counterparts (which is less computationallyefficient).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Sentence representations pre-trained from a large amount of data have been shown to be effective when transferred to a wide range of downstream tasks. Prior work along this line can be roughly divided into two categories: i) pre-trained models that require fine-tuning on the specific transferring task (Dai and Le, 2015; Ruder and Howard, 2018; Radford et al., 2018; Devlin et al., 2018; Cer et al., 2018) ; ii) methods that extract general-purpose sentence embeddings, which can be effectively applied to downstream NLP tasks without finetuning the encoder parameters (Kiros et al., 2015; Hill et al., 2016; Jernite et al., 2017; Gan et al., 2017; Adi et al., 2017; Logeswaran and Lee, 2018; Pagliardini et al., 2018; Tang and de Sa, 2018) . Our proposed methods belong to the second category and provide a generic and easy-to-use encoder to extract highly informative sentence representations. However, our work is unique since the embeddings inferred from our models are binarized and compact, and thus possess the advantages of small memory footprint and much faster sentence retrieval.", |
| "cite_spans": [ |
| { |
| "start": 303, |
| "end": 321, |
| "text": "(Dai and Le, 2015;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 322, |
| "end": 345, |
| "text": "Ruder and Howard, 2018;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 346, |
| "end": 367, |
| "text": "Radford et al., 2018;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 368, |
| "end": 388, |
| "text": "Devlin et al., 2018;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 389, |
| "end": 406, |
| "text": "Cer et al., 2018)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 570, |
| "end": 590, |
| "text": "(Kiros et al., 2015;", |
| "ref_id": null |
| }, |
| { |
| "start": 591, |
| "end": 609, |
| "text": "Hill et al., 2016;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 610, |
| "end": 631, |
| "text": "Jernite et al., 2017;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 632, |
| "end": 649, |
| "text": "Gan et al., 2017;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 650, |
| "end": 667, |
| "text": "Adi et al., 2017;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 668, |
| "end": 693, |
| "text": "Logeswaran and Lee, 2018;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 694, |
| "end": 719, |
| "text": "Pagliardini et al., 2018;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 720, |
| "end": 741, |
| "text": "Tang and de Sa, 2018)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Learning memory-efficient embeddings with deep neural networks has attracted substantial attention recently. One general strategy towards this goal is to extract discrete or binary data representations (Jang et al., 2016; Shu and Nakayama, 2017; Dai et al., 2017; Chen et al., 2018; Shen et al., 2018; Tissier et al., 2019) . Binarized embeddings are especially attractive because they are more memory-efficient (relative to discrete embeddings), and they also enjoy the advantages of fast retrieval based upon a Hamming distance calculation. Previous work along this line in NLP has mainly focused on learning compact representations at the word-level (Shu and Nakayama, 2017; Chen et al., 2018; Tissier et al., 2019) , while much less effort has been devoted to extracting binarized embeddings at the sentence-level. Our work aims to bridge this gap, and serves as an initial attempt to facilitate the deployment of state-of-the-art sentence embeddings on on-device mobile applications.", |
| "cite_spans": [ |
| { |
| "start": 202, |
| "end": 221, |
| "text": "(Jang et al., 2016;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 222, |
| "end": 245, |
| "text": "Shu and Nakayama, 2017;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 246, |
| "end": 263, |
| "text": "Dai et al., 2017;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 264, |
| "end": 282, |
| "text": "Chen et al., 2018;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 283, |
| "end": 301, |
| "text": "Shen et al., 2018;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 302, |
| "end": 323, |
| "text": "Tissier et al., 2019)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 653, |
| "end": 677, |
| "text": "(Shu and Nakayama, 2017;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 678, |
| "end": 696, |
| "text": "Chen et al., 2018;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 697, |
| "end": 718, |
| "text": "Tissier et al., 2019)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our work is also related to prior research on semantic hashing, which aims to learn binary text embeddings specifically for the information retrieval task (Salakhutdinov and Hinton, 2009; Zhang et al., 2010; Wang et al., 2014; Xu et al., 2015; Shen et al., 2018) . However, these methods are typically trained and evaluated on documents that belong to a specific domain, and thus cannot serve as generic binary sentence representation applicable to a wide variety of NLP taks. In contrast, our model is trained on large corpora and seeks to provide general-purpose binary representations that can be leveraged for various application scenarios.", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 187, |
| "text": "(Salakhutdinov and Hinton, 2009;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 188, |
| "end": 207, |
| "text": "Zhang et al., 2010;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 208, |
| "end": 226, |
| "text": "Wang et al., 2014;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 227, |
| "end": 243, |
| "text": "Xu et al., 2015;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 244, |
| "end": 262, |
| "text": "Shen et al., 2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We aim to produce compact and binarized representations from continuous sentence embeddings, and preserve the associated semantic information. Let x and f denote, respectively, an input sentence and the function defined by a pre-trained generalpurpose sentence encoder. Thus, f (x) represents the continuous embeddings extracted by the encoder. The goal of our model is to learn a universal transformation g that can convert f (x) to highly informative binary sentence representations, i.e., g(f (x)), which can be used as generic features for a collection of downstream tasks. We explore four strategies to parametrize the transformation g.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We use h and b to denote the continuous and binary sentence embeddings, respectively, and L denotes the dimension of h. The first method to binarize the continuous representations is to simply convert each dimension to either 0 or 1 based on a hard threshold. This strategy requires no training and directly operates on pre-trained continuous embeddings. Suppose s is the hard threshold, we have, for i = 1, 2, ......, L:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hard Threshold", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "b (i) = 1 h (i) >s = sign(h (i) \u2212 s) + 1 2 ,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Hard Threshold", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "One potential issue of this direct binarization method is that the information contained in the continuous representations may be largely lost, since there is no training objective encouraging the preservation of semantic information in the produced binary codes (Shen et al., 2018) . Another disadvantage is that the length of the resulting binary code must be the same as the original continuous representation, and can not be flexibly chosen. In practice, however, we may want to learn shorter binary embeddings to save more memory footprint or computation.", |
| "cite_spans": [ |
| { |
| "start": 263, |
| "end": 282, |
| "text": "(Shen et al., 2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hard Threshold", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To tackle the limitation of the above direct binarization method, we consider an alternative strategy that requires no training either: simply applying a random projection over the pre-trained continuous representations. Wieting and Kiela (2018) has shown that random sentence encoders can effectively construct universal sentence embeddings from word vectors, while possessing the flexibility of adaptively altering the embedding dimensions.", |
| "cite_spans": [ |
| { |
| "start": 221, |
| "end": 245, |
| "text": "Wieting and Kiela (2018)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Random Projection", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Here, we are interested in exploring whether a random projection would also work well while transforming continuous sentence representations into their binary counterparts. We randomly initialize a matrix W \u2208 R D\u00d7L , where D denotes the dimension of the resulting binary representations. Inspired by the standard initialization heuristic employed in (Glorot and Bengio, 2010; Wieting and Kiela, 2018) , the values of the matrix are initialized as sampled uniformly. For i = 1, 2, . . . , D and j = 1, 2, . . . , L, we have:", |
| "cite_spans": [ |
| { |
| "start": 350, |
| "end": 375, |
| "text": "(Glorot and Bengio, 2010;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 376, |
| "end": 400, |
| "text": "Wieting and Kiela, 2018)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Random Projection", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "W i,j \u223c Uniform(\u2212 1 \u221a D , 1 \u221a D ),", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Random Projection", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "After converting the continuous sentence embeddings to the desired dimension D with the matrix randomly initialized above, we further apply the operation in (1) to binarize it into the discrete/compact form. The dimension D can be set arbitrarily with this approach, which is easily applicable to any pre-trained sentence embeddings (since no training is needed). This strategy is related to the Locality-Sensitive Hashing (LSH) for inferring binary embeddings (Van Durme and Lall, 2010).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Random Projection", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We also consider an alternative strategy to adaptively choose the dimension of the resulting binary representations. Specifically, Principal Component Analysis (PCA) is utilized to reduce the dimensionality of pre-trained continuous embeddings. Given a set of sentences {x i } N i=1 and their corresponding continuous embeddings {h i } N i=1 \u2282 R L , we learn a projection matrix to reduce the embedding dimensions while keeping the embeddings distinct as much as possible. After centralizing the embeddings as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "h i = h i \u2212 1 N N i=1 h i , the data, as < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V S T c w = = < / l a t e x i t > 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "< l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V S T c w = = < / l a t e x i t > 0 < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > 1 < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V S T c w = = < / l a t e x i t > 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "< l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V S T c w = = < / l a t e x i t > 0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "< l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Principal Component Analysis", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A V e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "A V e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "A V e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "A V e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > 1 < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V S T c w = = < / l a t e x i t > 0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "< l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" J x F U R W U H 6 R + v J i r w M s 9 s g 7 W r H j U = \" > A A A B + H i c b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "V B N S w M x E J 2 t X 7 V + V T 1 6 C R b B U 8 m K o M e i F 4 9 V 7 A e 0 S 8 m m 2 T Y 0 y S 5 J V q h L / 4 F X P X s T r / 4 b j / 4 T 0 3 Y P t v X B w O O 9 G W b m h Y n g x m L 8 7 R X W 1 j c 2 t 4 r b p Z 3 d v f 2 D 8 u F R 0 8 S p p q x B Y x H r d k g M E 1 y x h u V W s H a i G Z G h Y K 1 w d D v 1 W 0 9 M G x 6 r R z t O W C D J Q P G I U 2 K d 9 I B L v X I F V / E M a J X 4 O a l A j n q v / N P t x z S V T F k q i D E d H y c 2 y I i 2 n A o 2 K X V T w x J C R 2 T A O o 4 q I p k J s t m l E 3 T m l D 6 K Y u 1 K W T R T / 0 5 k R B o z l q H r l M Q O z b I 3 F f / z O q m N r o O M q y S 1 T N H 5 o i g V y M Z o + j b q c 8 2 o F W N H C N X c 3 Y r o k G h C r Q t n Y U s o J y 4 T f z m B V d K 8 q P q 4 6 t 9 f V m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4 A V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "e 4 c 1 7 9 t 6 9 D + 9 z 3 l r w 8 p l j W I D 3 9 Q t O 0 p N e < / l a t e x i t > 1 < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "S T c w = = < / l a t e x i t > < l a t e x i t s h a 1 _ b a s e 6 4 = \" T L h i a f y z + 7 k V W 7 h d y Q 9 4 i j 1 7 7 M 0 = \" > A", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "A A B + X i c b V B N S w M x E J 2 t X 3 X 9 q n r 0 E i y C p 7 I R Q Y 9 F L x 4 r 2 l p o l 5 J N s 2 1 o k l 2 S r F C W / g S v e v Y m X v 0 1 H v 0 n p u 0 e b O u D g c d 7 M 8 z M i 1 L B j Q 2 C b 6 + 0 t r 6 x u V X e 9 n d 2 9 / Y P K o d H L Z N k m r I m T U S i 2 x E x T H D F m p Z b w d q p Z k R G g j 1 F o 9 u p / / T M t O G J", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "e r T j l I W S D B S P O S X W S Q / Y 9 3 u V a l A L Z k C r B B e k C g U a v c p P t 5 / Q T D J l q S D G d H C Q 2 j A n 2 n I q 2 M T v Z o a l h I 7 I g H U c V U Q y E + a z U y f o z C l 9 F C f a l b J o p v 6 d y I k 0 Z i w j 1 y m J H Z p l b y r + 5 3 U y G 1 + H O a matrix H = (h 1 , h 2 , . . . , h N ), has the singular value decomposition (SVD):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "V d p Z p m i 8 0 V x J p B N 0 P R v 1 O e a U S v G j h C q u b s V 0 S H R h F q X z s K W S E 5 c J n g 5 g V X S u q j h o I b v L 6 v 1 m y K d M p z A K Z w D h i u o w x 0 0 o A k U B v A C r / D m 5 d 6 7 9 + F 9 z l t L X j F z D A v w v n 4 B h V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "H = U \u039bV T ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "where \u039b is an L \u00d7 N matrix with descending singular values of X on its diagonal, with U and V orthogonal matrices. Then the correlation matrix can be written as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "HH T = U \u039b 2 U T . Assume that the diagonal matrix \u039b 2 = diag(\u03bb 1 , \u03bb 2 , . . . , \u03bb L ) has descending elements \u03bb 1 \u2265 \u03bb 2 \u2265 \u2022 \u2022 \u2022 \u2265 \u03bb L \u2265 0.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "We select first D rows of U as our projection matrix W = U 1:D , then the correlation matrix of W H is W HH T W T = diag(\u03bb 1 , \u03bb 2 , . . . , \u03bb D ), which indicates that the embeddings are projected to D independent and most distinctive axes. After projecting continuous embeddings to a representative lower dimensional space, we apply the hard threshold function at the position 0 to obtain the binary representations (since the embeddings are zero-centered).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "m o 3 e T p F O I F T O A c f r q A G d 1 C H B l C I 4", |
| "sec_num": null |
| }, |
| { |
| "text": "The methods proposed above suffer from the common issue that the model objective does not explicitly encourage the learned binary codes to retain the semantic information of the original continuous embeddings, and a separate binarization step is employed after training. To address this shortcoming, we further consider an autoencoder architecture, that leverages the reconstruction loss to hopefully endow the learned binary representations with more information. Specifically, an encoder network is utilized to transform the continuous into a binary latent vector, which is then reconstructed back with a decoder network.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Autoencoder Architecture", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "For the encoder network, we use a matrix operation, followed by a binarization step, to extract useful features (similar to the random projection setup). Thus, for i = 1, 2, . . . , D, we have:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Autoencoder Architecture", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "b (i) = 1 \u03c3(W i \u2022h+k (i) )>s (i) = sign(\u03c3(W i \u2022 h + k (i) ) \u2212 s (i) ) + 1 2 ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Autoencoder Architecture", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where k is the bias term and k (i) corresponds to the i-th element of k. s (i) denotes the threshold determining whether the i-th bit is 0 or 1. During training, we may use either deterministic or stochastic binarization upon the latent variable. For the deterministic case, s (i) = 0.5 for all dimensions; in the stochastic case, s (i) is uniformly sampled as: s (i) \u223c Uniform(0, 1). We conduct an empirical comparison between these two binarization strategies in Section 4. Prior work have shown that linear decoders are favorable for learning binary codes under the encoder-decoder framework (Carreira-Perpin\u00e1n and Raziperchikolaei, 2015; Dai et al., 2017; Shen et al., 2018) . Inspired by these results, we employ a linear transformation to reconstruct the original continuous embeddings from the binary codes:", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 78, |
| "text": "(i)", |
| "ref_id": null |
| }, |
| { |
| "start": 595, |
| "end": 641, |
| "text": "(Carreira-Perpin\u00e1n and Raziperchikolaei, 2015;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 642, |
| "end": 659, |
| "text": "Dai et al., 2017;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 660, |
| "end": 678, |
| "text": "Shen et al., 2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Autoencoder Architecture", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h (i) = W i \u2022 b + k (i) ,", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Autoencoder Architecture", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where W and k are weight and bias term respectively, which are learned. The mean square error between h and\u0125 is employed as the reconstruction loss:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Autoencoder Architecture", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L rec = 1 D D i=1 (h (i) \u2212\u0125 (i) ) 2 ,", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Autoencoder Architecture", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "This objective imposes the binary vector b to encode more information from h (leading to smaller reconstruction error). Straight-through (ST) estimator (Hinton, 2012) is utilized to estimate the gradients for the binary variable. The autoencoder model is optimized by minimizing the reconstruction loss for all sentences. After training, the encoder network is leveraged as the transformation to convert the pre-trained continuous embeddings into the binary form.", |
| "cite_spans": [ |
| { |
| "start": 152, |
| "end": 166, |
| "text": "(Hinton, 2012)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Autoencoder Architecture", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Although the reconstruction objective can help the binary variable to endow with richer semantics, there is no loss that explicitly encourages the binary vectors to preserve the similarity information contained in the original continuous embeddings. Consequently, the model may lead to small reconstruction error but yield sub-optimal binary representations (Tissier et al., 2019) . To improve the semantic-preserving property of the inferred binary embeddings, we introduce an additional objective term.", |
| "cite_spans": [ |
| { |
| "start": 358, |
| "end": 380, |
| "text": "(Tissier et al., 2019)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic-preserving Regularizer", |
| "sec_num": "3.4.1" |
| }, |
| { |
| "text": "Consider a triple group of sentences (x \u03b1 , x \u03b2 , x \u03b3 ), whose continuous embeddings are (h \u03b1 , h \u03b2 , h \u03b3 ), respectively. Suppose that the cosine similarity between h \u03b1 and h \u03b2 is larger than that between h \u03b2 and h \u03b3 , then it is desirable that the Hamming distance between b \u03b1 and b \u03b2 should be smaller than that between b \u03b2 and b \u03b3 (notably, both large cosine similarity and small Hamming distance indicate that two sentences are semantically-similar).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic-preserving Regularizer", |
| "sec_num": "3.4.1" |
| }, |
| { |
| "text": "Let ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic-preserving Regularizer", |
| "sec_num": "3.4.1" |
| }, |
| { |
| "text": "= 1 if d c (h \u03b1 , h \u03b2 ) \u2265 d c (h \u03b2 , h \u03b3 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic-preserving Regularizer", |
| "sec_num": "3.4.1" |
| }, |
| { |
| "text": ", and l \u03b1,\u03b2,\u03b3 = \u22121 otherwise. The semantic-preserving regularizer is then defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic-preserving Regularizer", |
| "sec_num": "3.4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L sp = \u03b1,\u03b2,\u03b3 max{0, l \u03b1,\u03b2,\u03b3 [d h (b \u03b1 , b \u03b2 ) \u2212 d h (b \u03b2 , b \u03b3 )]},", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Semantic-preserving Regularizer", |
| "sec_num": "3.4.1" |
| }, |
| { |
| "text": "By penalizing L sp , the learned transformation function g is explicitly encouraged to retain the semantic similarity information of the original continuous embeddings. Thus, the entire objective function to be optimized is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic-preserving Regularizer", |
| "sec_num": "3.4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L = L rec + \u03bb sp L sp ,", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Semantic-preserving Regularizer", |
| "sec_num": "3.4.1" |
| }, |
| { |
| "text": "where \u03bb sp controls the relative weight between the reconstruction loss (L rec ) and semantic-preserving loss (L sp ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic-preserving Regularizer", |
| "sec_num": "3.4.1" |
| }, |
| { |
| "text": "Another possible strategy is to directly train the general-purpose binary embeddings from scratch, i.e., jointly optimizing the continuous embeddings training objective and continuous-to-binary parameterization. However, our initial attempts demonstrate that this strategy leads to inferior empirical results. This observation is consistent with the results reported in (Kiros and Chan, 2018) . Specifically, a binarization layer is directly appended over the InferSent architecture (Conneau et al., 2017) during training, which gives rise to much larger drop in terms of the embeddings' quality (we have conducted empirical comparisons with (Kiros and Chan, 2018) in Table 1 ). Therefore, here we focus on learning universal binary embeddings based on pretained continuous sentence representations.", |
| "cite_spans": [ |
| { |
| "start": 370, |
| "end": 392, |
| "text": "(Kiros and Chan, 2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 483, |
| "end": 505, |
| "text": "(Conneau et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 642, |
| "end": 664, |
| "text": "(Kiros and Chan, 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 668, |
| "end": 675, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "4 Experimental setup", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Our proposed model aims to produce highly informative binary sentence embeddings based upon pre-trained continuous representations. In this paper, we utilize InferSent (Conneau et al., 2017) as the continuous embeddings (given its effectiveness and widespread use). Note that all four proposed strategies can be easily extended to other pre-trained general-purpose sentence embeddings as well. Specifically, a bidirectional LSTM architecture along with a max-pooling operation over the hidden units is employed as the sentence encoder, and the model parameters are optimized on the natural language inference tasks, i.e., Standford Natural Language Inference (SNLI) (Bowman et al., 2015) and Multi-Genre Natural Language Inference (MultiNLI) datasets (Williams et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 168, |
| "end": 190, |
| "text": "(Conneau et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 751, |
| "end": 774, |
| "text": "(Williams et al., 2017)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-trained Continuous Embeddings", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Our model is trained using Adam (Kingma and Ba, 2014), with a value 1 \u00d7 10 \u22125 as the learning rate for all the parameters. The number of bits (i.e., dimension) of the binary representation is set as 512, 1024, 2048 or 4096, and the best choice for each model is chosen on the validation set, and the corresponding test results are presented in Table 1. The batch size is chosen as 64 for all model variants. The hyperparameter over \u03bb sp is selected from {0.2, 0.5, 0.8, 1} on the validation set, and 0.8 is found to deliver the best empirical results. The training with the autoencoder setup takes only about 1 hour to converge, and thus can be readily applicable to even larger datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Details", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To facilitate comparisons with other baseline methods, we use SentEval toolkit 1 (Conneau and Kiela, 2018) to evaluate the learned binary (compact) sentence embeddings. Concretely, the learned representations are tested on a series of downstream tasks to assess their transferability (with the encoder weights fixed), which can be categorized as follows:", |
| "cite_spans": [ |
| { |
| "start": 81, |
| "end": 106, |
| "text": "(Conneau and Kiela, 2018)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 Sentence classification, including sentiment analysis (MR, SST), product reviews (CR), subjectivity classification (SUBJ), opinion polarity detection (MPQA) and question type classification (TREC). A linear classifier is trained with the generic sentence embeddings as the input features. The default SentEval settings is used for all the datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u2022 Sentence matching, which comprises semantic relatedness (SICK-R, STS14, STSB) and paraphrase detection (MRPC). Particularly, each pair of sentences in STS14 dataset is associated with a similarity score from 0 to 5 (as the corresponding label). Hamming distance between the binary representations is directly leveraged as the prediction score (without any classifier parameters).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "1 https://github.com/facebookresearch/SentEval For the sentence matching benchmarks, to allow fair comparison with the continuous embeddings, we do not use the same classifier architecture in SentEval. Instead, we obtain the predicted relatedness by directly computing the cosine similarity between the continuous embeddings. Consequently, there are no classifier parameters for both the binary and continuous representations. The same valuation metrics in SentEval (Conneau and Kiela, 2018) are utilized for all the tasks. For MRPC, the predictions are made by simply judging whether a sentence pair's score is larger or smaller than the averaged Hamming distance (or cosine similarity).", |
| "cite_spans": [ |
| { |
| "start": 466, |
| "end": 491, |
| "text": "(Conneau and Kiela, 2018)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We consider several strong baselines to compare with the proposed methods, which include both continuous (dense) and binary (compact) representations. For the continuous generic sentence embeddings, we make comparisons with fastText-BoV (Joulin et al., 2016) , Skip-Thought Vectors (Kiros et al., 2015) and InferSent (Conneau et al., 2017) . As to the binary embeddings, we consider the binarized version of InferLite (Kiros and Chan, 2018) , which, as far as we are concerned, is the only general-purpose binary representations baseline reported.", |
| "cite_spans": [ |
| { |
| "start": 237, |
| "end": 258, |
| "text": "(Joulin et al., 2016)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 282, |
| "end": 302, |
| "text": "(Kiros et al., 2015)", |
| "ref_id": null |
| }, |
| { |
| "start": 317, |
| "end": 339, |
| "text": "(Conneau et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 418, |
| "end": 440, |
| "text": "(Kiros and Chan, 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We experimented with five model variants to learn general-purpose binary embeddings: HTbinary (hard threshold, which is selected from {0, 0.01, 0.1} on the validation set), Rand-binary (random projection), PCA-binary (reduce the dimensionality with principal component analysis), AE-binary (autoencoder with the reconstruction objective) and AE-binary-SP (autoencoder with both the reconstruction objective and Semantic-Preserving loss). Our code will be released to encourage future research.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We evalaute the binary sentence representations produced by different methods with a set of transferring tasks. The results are shown in Table 1 . The proposed autoencoder architecture generally demonstrates the best results. Especially while combined with the semantic-preserving loss defined in (7), AE-binary-SP exhibits higher performance compared with a standard autoencoder. It is worth noting that the Rand-binary and PCAbinary model variants also show competitive performance despite their simplicity. These strategies are also quite promising given that no training is required given the pre-trained continuous sentence representations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 137, |
| "end": 144, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Task transfer evaluation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Another important result is that, the AE-binary-SP achieves competitive results relative to the In-ferSent, leading to only about 2% loss on most datasets and even performing at par with InferSent on several datasets, such as the MPQA and STS14 datasets. On the sentence matching tasks, the yielded binary codes are evaluated by merely utilizing the hamming distance features (as mentioned above). To allow fair comparison, we compare the predicted scores with the cosine similarity scores based upon the continuous representations (there are no additional parameters for the classifier). The binary codes brings out promising empirical results relative to their continuous counterparts, and even slightly outperform InferSent on the STS14 dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task transfer evaluation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We also found that our AE-binary-SP model variant consistently demonstrate superior results than the InferLite baselines, which optimize the NLI objective directly over the binary representations. This may be attributed to the difficulty of backpropagating gradients through discrete/binary variables, and would be an interesting direction for future research.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task transfer evaluation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Case Study One major advantage of binary sentence representations is that the similarity of two sentences can be evaluated by merely calculating the hamming distance between their binary codes. To gain more intuition regarding the semantic information encoded in the binary embeddings, we convert all the sentences in the SNLI dataset into continuous and binary vectors (with InferSent-G and AE-binary-SP, respectively). The top-3 closet sentences are retrieved based upon the corresponding metrics, and the resulting samples are shown in Table 2 . It can be observed that the sentences selected based upon the Hamming distance indeed convey very similar semantic meanings. In some cases, the results with binary codes are even more reasonable compared with the continuous embeddings. For example, for the first query, all three sentences in the left column relate to \"watching a movie\", while one of the sentences in the right column is about \"sleeping\".", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 539, |
| "end": 546, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Nearest Neighbor Retrieval", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Retrieval Speed The bitwise comparison is much faster than the element-wise multiplication operation (between real-valued vectors) (Tissier et al., 2019) . To verify the speed improvement, we sample 10000 sentence pairs from SNLI and extract their continuous and binary embeddings (with the same dimension of 4096), respectively. We record the time to compute the cosine similarity and hamming distance between the corresponding representations. With our Python implementation, it takes 3.67\u00b5s and 288ns respectively, indicating that calculating the Hamming distance is over 12 times faster. Our implementation is not optimized, and the running time of computing Hamming distance can be further improved (to be proportional to the number of different bits, rather than the input length 2 ).", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 153, |
| "text": "(Tissier et al., 2019)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Nearest Neighbor Retrieval", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "To investigate the importance of incorporating the locality-sensitive regularizer, we select different values of \u03bb sp (ranging from 0.0 to 1.0) and explore how the transfer results would change accordingly. The \u03bb sp controls the relative weight of the semantic-preserving loss term. As shown in Table 3 , augmenting the semantic-preserving loss consistently improves the quality of learned binary embeddings, while the best test accuracy on the MR dataset is obtained with \u03bb sp = 0.8.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 295, |
| "end": 302, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "Hamming Distance (binary embeddings)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "Cosine Similarity (continuous embeddings) Query: Several people are sitting in a movie theater . A group of people watching a movie at a theater .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "A group of people watching a movie at a theater . A crowd of people are watching a movie indoors .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "A man is watching a movie in a theater . A man is watching a movie in a theater .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "Some people are sleeping on a sofa in front of the television .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "Query: A woman crossing a busy downtown street . A lady is walking down a busy street .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "A woman walking on the street downtown . A woman is on a crowded street .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "A lady is walking down a busy street . A woman walking on the street downtown .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "A man and woman walking down a busy street .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "Query: A well dressed man standing in front of piece of artwork . A well dressed man standing in front of an abstract fence painting .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "A man wearing headphones is standing in front of a poster . A man wearing headphones is standing in front of a poster .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "A man standing in front of a chalkboard points at a drawing . A man in a blue shirt standing in front of a garage-like structure painted with geometric designs .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "A man in a blue shirt standing in front of a garage-like structure painted with geometric designs .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "Query: A woman is sitting at a bar eating a hamburger . A woman sitting eating a sandwich .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "A woman is sitting in a cafe eating lunch . A woman is sitting in a cafe eating lunch .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "A woman is eating at a diner . The woman is eating a hotdog in the middle of her bedroom .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "A woman is eating her meal at a resturant .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "Query: Group of men trying to catch fish with a fishing net . Two men are on a boat trying to fish for food during a sunset .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "There are three men on a fishing boat trying to catch bass . There are three men on a fishing boat trying to catch bass .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "Two men are trying to fish . Two men pull a fishing net up into their red boat .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "Two men are on a boat trying to fish for food during a sunset . Table 3 : Ablation study for the AE-binary-SP model with different choices of \u03bb sp (evaluated with test accuracy on the MR dataset).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 64, |
| "end": 71, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The effect of semantic-preserving loss", |
| "sec_num": "5.3.1" |
| }, |
| { |
| "text": "As discussed in Section 3.4, the binary latent vector b can be obtained with either a deterministic or stochastically sampled threshold. We compare these two sampling strategies on several downstream tasks. As illustrated in Figure 2 , setting a fixed threshold demonstrates better empirical performance on all the datasets. Therefore, deterministic threshold is employed for all the autoencoder model variants in our experiments. extracted binary embeddings to their dimensions, we run four model variants (Rand-binary, PCAbinary, AE-binary, AE-binary-SP) with different number of bits (i.e., 512, 1024, 2048, 4096) , and their corresponding results on the MR dataset are shown in Figure 3 . For the AE-binary and AE-binary-SP models, longer binary codes consistently deliver better results. While for the Rand-binary and PCA-binary variants, the quality of inferred representations is much less sensitive to the embedding dimension. Notably, these two strategies exhibit competitive performance even with only 512 bits. Therefore, in the case where less memory footprint or little training is preferred, Rand-binary and PCA-binary could be more judicious choices.", |
| "cite_spans": [ |
| { |
| "start": 587, |
| "end": 616, |
| "text": "(i.e., 512, 1024, 2048, 4096)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 225, |
| "end": 233, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 682, |
| "end": 690, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sampling strategy", |
| "sec_num": "5.3.2" |
| }, |
| { |
| "text": "This paper presents a first step towards learning binary and general-purpose sentence representations that allow for efficient storage and fast retrieval over massive corpora. To this end, we ex-plore four distinct strategies to convert pre-trained continuous sentence embeddings into a binarized form. Notably, a regularized autoencoder augmented with semantic-preserving loss exhibits the best empirical results, degrading performance by only around 2% while saving over 98% memory footprint. Besides, two other model variants with a random projection or PCA transformation require no training and demonstrate competitive embedding quality even with relatively small dimensions. Experiments on nearest-neighbor sentence retrieval further validate the effectiveness of proposed framework.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://en.wikipedia.org/wiki/ Hamming_distance", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Fine-grained analysis of sentence embeddings using auxiliary prediction tasks", |
| "authors": [ |
| { |
| "first": "Yossi", |
| "middle": [], |
| "last": "Adi", |
| "suffix": "" |
| }, |
| { |
| "first": "Einat", |
| "middle": [], |
| "last": "Kermany", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ofer", |
| "middle": [], |
| "last": "Lavi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yossi Adi, Einat Kermany, Yonatan Belinkov, Ofer Lavi, and Yoav Goldberg. 2017. Fine-grained anal- ysis of sentence embeddings using auxiliary predic- tion tasks. CoRR, abs/1608.04207.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A large annotated corpus for learning natural language inference", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Potts", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large anno- tated corpus for learning natural language inference. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Hashing with binary autoencoders", |
| "authors": [ |
| { |
| "first": "Ramin", |
| "middle": [], |
| "last": "Miguel A Carreira-Perpin\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Raziperchikolaei", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "557--566", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Miguel A Carreira-Perpin\u00e1n and Ramin Raziperchiko- laei. 2015. Hashing with binary autoencoders. In Proceedings of the IEEE conference on computer vi- sion and pattern recognition, pages 557-566.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Chris Tar, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinfei", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Sheng Yi Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicole", |
| "middle": [], |
| "last": "Hua", |
| "suffix": "" |
| }, |
| { |
| "first": "Rhomni", |
| "middle": [], |
| "last": "Limtiaco", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "St", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Mario", |
| "middle": [], |
| "last": "Constant", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Guajardo-Cespedes", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Cer, Yinfei Yang, Sheng yi Kong, Nan Hua, Nicole Limtiaco, Rhomni St. John, Noah Con- stant, Mario Guajardo-Cespedes, Steve Yuan, Chris Tar, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil. 2018. Universal sentence encoder. CoRR, abs/1803.11175.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Learning k-way d-dimensional discrete codes for compact embedding representations", |
| "authors": [ |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yizhou", |
| "middle": [], |
| "last": "Martin Renqiang Min", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1806.09464" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ting Chen, Martin Renqiang Min, and Yizhou Sun. 2018. Learning k-way d-dimensional discrete codes for compact embedding representations. arXiv preprint arXiv:1806.09464.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Senteval: An evaluation toolkit for universal sentence representations", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1803.05449" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau and Douwe Kiela. 2018. Senteval: An evaluation toolkit for universal sentence representa- tions. arXiv preprint arXiv:1803.05449.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Supervised learning of universal sentence representations from natural language inference data", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo\u00efc Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Semi-supervised sequence learning", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3079--3087", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew M Dai and Quoc V Le. 2015. Semi-supervised sequence learning. In Advances in neural informa- tion processing systems, pages 3079-3087.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Stochastic generative hashing", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruiqi", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjiv", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Niao", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Le", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 34th International Conference on Machine Learning", |
| "volume": "70", |
| "issue": "", |
| "pages": "913--922", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Dai, Ruiqi Guo, Sanjiv Kumar, Niao He, and Le Song. 2017. Stochastic generative hashing. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pages 913-922. JMLR. org.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Learning generic sentence representations using convolutional neural networks", |
| "authors": [ |
| { |
| "first": "Zhe", |
| "middle": [], |
| "last": "Gan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yunchen", |
| "middle": [], |
| "last": "Pu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Henao", |
| "suffix": "" |
| }, |
| { |
| "first": "Chunyuan", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "Carin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhe Gan, Yunchen Pu, Ricardo Henao, Chunyuan Li, Xiaodong He, and Lawrence Carin. 2017. Learning generic sentence representations using convolutional neural networks. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Understanding the difficulty of training deep feedforward neural networks", |
| "authors": [ |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Glorot", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the thirteenth international conference on artificial intelligence and statistics", |
| "volume": "", |
| "issue": "", |
| "pages": "249--256", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understand- ing the difficulty of training deep feedforward neu- ral networks. In Proceedings of the thirteenth in- ternational conference on artificial intelligence and statistics, pages 249-256.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Learning distributed representations of sentences from unlabelled data", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Hill, Kyunghyun Cho, and Anna Korhonen. 2016. Learning distributed representations of sentences from unlabelled data. In HLT-NAACL.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Neural networks for machine learning. coursera", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G Hinton. 2012. Neural networks for machine learn- ing. coursera,[video lectures].", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Categorical reparameterization with gumbel-softmax", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Jang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shixiang", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Poole", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1611.01144" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric Jang, Shixiang Gu, and Ben Poole. 2016. Categor- ical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Discourse-based objectives for fast unsupervised sentence representation learning", |
| "authors": [ |
| { |
| "first": "Yacine", |
| "middle": [], |
| "last": "Jernite", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [ |
| "R" |
| ], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "David A", |
| "middle": [], |
| "last": "Son", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yacine Jernite, Samuel R. Bowman, and David A Son- tag. 2017. Discourse-based objectives for fast un- supervised sentence representation learning. CoRR, abs/1705.00557.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Bag of tricks for efficient text classification", |
| "authors": [ |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1607.01759" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2016. Bag of tricks for efficient text classification. arXiv preprint arXiv:1607.01759.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Inferlite: Simple universal sentence representations from natural language inference data", |
| "authors": [ |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Chan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "4868--4874", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jamie Kiros and William Chan. 2018. Inferlite: Sim- ple universal sentence representations from natural language inference data. In Proceedings of the 2018 Conference on Empirical Methods in Natural Lan- guage Processing, pages 4868-4874.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Raquel Urtasun, and Sanja Fidler. 2015. Skip-thought vectors", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "Yukun", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zemel", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Torralba", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Kiros, Yukun Zhu, Ruslan Salakhutdinov, Richard S. Zemel, Antonio Torralba, Raquel Urta- sun, and Sanja Fidler. 2015. Skip-thought vectors. In NIPS.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "An efficient framework for learning sentence representations", |
| "authors": [ |
| { |
| "first": "Lajanugen", |
| "middle": [], |
| "last": "Logeswaran", |
| "suffix": "" |
| }, |
| { |
| "first": "Honglak", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lajanugen Logeswaran and Honglak Lee. 2018. An efficient framework for learning sentence represen- tations. ICLR.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Dissent: Sentence representation learning from explicit discourse relations", |
| "authors": [ |
| { |
| "first": "Allen", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| }, |
| { |
| "first": "Erin", |
| "middle": [ |
| "D" |
| ], |
| "last": "Bennett", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "D" |
| ], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Allen Nie, Erin D. Bennett, and Noah D. Good- man. 2017. Dissent: Sentence representation learning from explicit discourse relations. CoRR, abs/1710.04334.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Unsupervised learning of sentence embeddings using compositional n-gram features", |
| "authors": [ |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Pagliardini", |
| "suffix": "" |
| }, |
| { |
| "first": "Prakhar", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Jaggi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matteo Pagliardini, Prakhar Gupta, and Martin Jaggi. 2018. Unsupervised learning of sentence embed- dings using compositional n-gram features. In NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Improving language understanding by generative pre-training", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Narasimhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Salimans", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language under- standing by generative pre-training. URL https://s3- us-west-2. amazonaws. com/openai-assets/research- covers/languageunsupervised/language under- standing paper. pdf.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Selfgoverning neural networks for on-device short text classification", |
| "authors": [ |
| { |
| "first": "Sujith", |
| "middle": [], |
| "last": "Ravi", |
| "suffix": "" |
| }, |
| { |
| "first": "Zornitsa", |
| "middle": [], |
| "last": "Kozareva", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "804--810", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sujith Ravi and Zornitsa Kozareva. 2018. Self- governing neural networks for on-device short text classification. In Proceedings of the 2018 Confer- ence on Empirical Methods in Natural Language Processing, pages 804-810.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Universal language model fine-tuning for text classification", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Howard", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Ruder and Jeremy Howard. 2018. Universal language model fine-tuning for text classification. In ACL.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Semantic hashing", |
| "authors": [ |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "International Journal of Approximate Reasoning", |
| "volume": "50", |
| "issue": "7", |
| "pages": "969--978", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruslan Salakhutdinov and Geoffrey Hinton. 2009. Se- mantic hashing. International Journal of Approxi- mate Reasoning, 50(7):969-978.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Nash: Toward end-to-end neural architecture for generative semantic hashing", |
| "authors": [ |
| { |
| "first": "Dinghan", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Qinliang", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Paidamoyo", |
| "middle": [], |
| "last": "Chapfuwa", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenlin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Guoyin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "Carin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Henao", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dinghan Shen, Qinliang Su, Paidamoyo Chapfuwa, Wenlin Wang, Guoyin Wang, Lawrence Carin, and Ricardo Henao. 2018. Nash: Toward end-to-end neural architecture for generative semantic hashing. In ACL.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Compressing word embeddings via deep compositional code learning", |
| "authors": [ |
| { |
| "first": "Raphael", |
| "middle": [], |
| "last": "Shu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideki", |
| "middle": [], |
| "last": "Nakayama", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1711.01068" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raphael Shu and Hideki Nakayama. 2017. Compress- ing word embeddings via deep compositional code learning. arXiv preprint arXiv:1711.01068.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Improving sentence representations with multi-view frameworks", |
| "authors": [ |
| { |
| "first": "Shuai", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Virginia R De Sa", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.01064" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuai Tang and Virginia R de Sa. 2018. Improving sen- tence representations with multi-view frameworks. arXiv preprint arXiv:1810.01064.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Near-lossless binarization of word embeddings", |
| "authors": [ |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Tissier", |
| "suffix": "" |
| }, |
| { |
| "first": "Amaury", |
| "middle": [], |
| "last": "Habrard", |
| "suffix": "" |
| }, |
| { |
| "first": "Christophe", |
| "middle": [], |
| "last": "Gravier", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julien Tissier, Amaury Habrard, and Christophe Gravier. 2019. Near-lossless binarization of word embeddings. AAAI.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Online generation of locality sensitive hash signatures", |
| "authors": [ |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashwin", |
| "middle": [], |
| "last": "Lall", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the ACL 2010 Conference Short Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "231--235", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Benjamin Van Durme and Ashwin Lall. 2010. Online generation of locality sensitive hash signatures. In Proceedings of the ACL 2010 Conference Short Pa- pers, pages 231-235. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Hashing for similarity search: A survey", |
| "authors": [ |
| { |
| "first": "Jingdong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingkuan", |
| "middle": [], |
| "last": "Heng Tao Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianqiu", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1408.2927" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jingdong Wang, Heng Tao Shen, Jingkuan Song, and Jianqiu Ji. 2014. Hashing for similarity search: A survey. arXiv preprint arXiv:1408.2927.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Towards universal paraphrastic sentence embeddings", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Wieting", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Livescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Wieting, Mohit Bansal, Kevin Gimpel, and Karen Livescu. 2016. Towards universal paraphrastic sen- tence embeddings. CoRR, abs/1511.08198.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Paranmt-50m: Pushing the limits of paraphrastic sentence embeddings with millions of machine translations", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Wieting", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Wieting and Kevin Gimpel. 2018. Paranmt-50m: Pushing the limits of paraphrastic sentence embed- dings with millions of machine translations. In ACL.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "No training required: Exploring random encoders for sentence classification", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Wieting", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Wieting and Douwe Kiela. 2018. No training required: Exploring random encoders for sentence classification. CoRR, abs/1901.10444.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "A broad-coverage challenge corpus for sentence understanding through inference", |
| "authors": [ |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Nangia", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel R", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1704.05426" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adina Williams, Nikita Nangia, and Samuel R Bow- man. 2017. A broad-coverage challenge corpus for sentence understanding through inference. arXiv preprint arXiv:1704.05426.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Convolutional neural networks for text hashing", |
| "authors": [ |
| { |
| "first": "Jiaming", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Guanhua", |
| "middle": [], |
| "last": "Tian", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Fangyuan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongwei", |
| "middle": [], |
| "last": "Hao", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Twenty-Fourth International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiaming Xu, Peng Wang, Guanhua Tian, Bo Xu, Jun Zhao, Fangyuan Wang, and Hongwei Hao. 2015. Convolutional neural networks for text hashing. In Twenty-Fourth International Joint Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Self-taught hashing for fast similarity search", |
| "authors": [ |
| { |
| "first": "Dell", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Deng", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinsong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 33rd international ACM SI-GIR conference on Research and development in information retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "18--25", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dell Zhang, Jun Wang, Deng Cai, and Jinsong Lu. 2010. Self-taught hashing for fast similarity search. In Proceedings of the 33rd international ACM SI- GIR conference on Research and development in in- formation retrieval, pages 18-25. ACM.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "S T c w = = < / l a t e x i t > Proposed model architectures: (a) direct binarization with a hard threshold s; (b) reducing the dimensionality with either a random projection or PCA, followed by a binarization step; (c) an encoding-decoding framework with an additional semantic-preserving loss.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "text": "d c (\u2022, \u2022) and d h (\u2022, \u2022) denote the cosine similarity and Hamming distance (in the continuous and binary embedding space), respectively. Define l \u03b1,\u03b2,\u03b3 as an indicator such that, l \u03b1,\u03b2,\u03b3", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "text": "The comparison between deterministic and stochastic sampling for the autoencoder strategy.5.3.3 The effect of embedding dimensionExcept for the hard threshold method, other three proposed strategies all possess the flexibility of adaptively choosing the dimension of learned binary representations. To explore the sensitivity of The test accuracy of different model on the MR dataset across 512, 1024, 2048, 4096 bits for the learned binary representations.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "text": "Performance on the test set for 10 downstream tasks. The STS14, STSB and MRPC are evaluated with Pearson and Spearman correlations, and SICK-R is measured with Pearson correlation. All other datasets are evaluated with test accuracy. InferSent-G uses Glove (G) as the word embeddings, while InferSent-FF employs FastText (F) embeddings with Fixed (F) padding. The empirical results of InferLite with different lengths of binary embeddings, i.e., 256, 1024 and 4096, are considered." |
| }, |
| "TABREF2": { |
| "html": null, |
| "content": "<table><tr><td>\u03bbsp</td><td>0.0</td><td>0.2</td><td>0.5</td><td>0.8</td><td>1.0</td></tr><tr><td>Accuracy</td><td colspan=\"5\">78.2 78.5 78.5 79.1 78.4</td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "text": "Nearest neighbor retrieval results on the SNLI dataset. Given a a query sentence, the left column shows the top-3 retrieved samples based upon the hamming distance with all sentences' binary representations, while the right column exhibits the samples according to the cosine similarity of their continuous embeddings." |
| } |
| } |
| } |
| } |