| { |
| "paper_id": "Q15-1004", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:07:40.630493Z" |
| }, |
| "title": "SPRITE: Generalizing Topic Models with Structured Priors", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [ |
| "J" |
| ], |
| "last": "Paul", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": { |
| "postCode": "21218", |
| "settlement": "Baltimore", |
| "region": "MD" |
| } |
| }, |
| "email": "mpaul@cs.jhu.edu" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Johns Hopkins University", |
| "location": { |
| "postCode": "21218", |
| "settlement": "Baltimore", |
| "region": "MD" |
| } |
| }, |
| "email": "mdredze@cs.jhu.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We introduce SPRITE, a family of topic models that incorporates structure into model priors as a function of underlying components. The structured priors can be constrained to model topic hierarchies, factorizations, correlations, and supervision, allowing SPRITE to be tailored to particular settings. We demonstrate this flexibility by constructing a SPRITE-based model to jointly infer topic hierarchies and author perspective, which we apply to corpora of political debates and online reviews. We show that the model learns intuitive topics, outperforming several other topic models at predictive tasks.", |
| "pdf_parse": { |
| "paper_id": "Q15-1004", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We introduce SPRITE, a family of topic models that incorporates structure into model priors as a function of underlying components. The structured priors can be constrained to model topic hierarchies, factorizations, correlations, and supervision, allowing SPRITE to be tailored to particular settings. We demonstrate this flexibility by constructing a SPRITE-based model to jointly infer topic hierarchies and author perspective, which we apply to corpora of political debates and online reviews. We show that the model learns intuitive topics, outperforming several other topic models at predictive tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Topic models can be a powerful aid for analyzing large collections of text by uncovering latent interpretable structures without manual supervision. Yet people often have expectations about topics in a given corpus and how they should be structured for a particular task. It is crucial for the user experience that topics meet these expectations yet black box topic models provide no control over the desired output.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper presents SPRITE, a family of topic models that provide a flexible framework for encoding preferences as priors for how topics should be structured. SPRITE can incorporate many types of structure that have been considered in prior work, including hierarchies (Blei et al., 2003a; Mimno et al., 2007) , factorizations (Paul and Dredze, 2012; Eisenstein et al., 2011) , sparsity Balasubramanyan and Cohen, 2013) , correlations between topics (Blei and Lafferty, 2007; Li and McCallum, 2006) , preferences over word choices (Andrzejewski et al., 2009; , and associations between topics and document attributes (Ramage et al., 2009; Mimno and McCallum, 2008) . SPRITE builds on a standard topic model, adding structure to the priors over the model parameters. The priors are given by log-linear functions of underlying components ( \u00a72), which provide additional latent structure that we will show can enrich the model in many ways. By applying particular constraints and priors to the component hyperparameters, a variety of structures can be induced such as hierarchies and factorizations ( \u00a73), and we will show that this framework captures many existing topic models ( \u00a74).", |
| "cite_spans": [ |
| { |
| "start": 269, |
| "end": 289, |
| "text": "(Blei et al., 2003a;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 290, |
| "end": 309, |
| "text": "Mimno et al., 2007)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 327, |
| "end": 350, |
| "text": "(Paul and Dredze, 2012;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 351, |
| "end": 375, |
| "text": "Eisenstein et al., 2011)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 387, |
| "end": 419, |
| "text": "Balasubramanyan and Cohen, 2013)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 450, |
| "end": 475, |
| "text": "(Blei and Lafferty, 2007;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 476, |
| "end": 498, |
| "text": "Li and McCallum, 2006)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 531, |
| "end": 558, |
| "text": "(Andrzejewski et al., 2009;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 617, |
| "end": 638, |
| "text": "(Ramage et al., 2009;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 639, |
| "end": 664, |
| "text": "Mimno and McCallum, 2008)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "After describing the general form of the model, we show how SPRITE can be tailored to particular settings by describing a specific model for the applied task of jointly inferring topic hierarchies and perspective ( \u00a76). We experiment with this topic+perspective model on sets of political debates and online reviews ( \u00a77), and demonstrate that SPRITE learns desired structures while outperforming many baselines at predictive tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our model family generalizes latent Dirichlet allocation (LDA) (Blei et al., 2003b) . Under LDA, there are K topics, where a topic is a categorical distribution over V words parameterized by \u03c6 k . Each document has a categorical distribution over topics, parameterized by \u03b8 m for the mth document. Each observed word in a document is generated by drawing a topic z from \u03b8 m , then drawing the word from \u03c6 z . \u03b8 and \u03c6 have priors given by Dirichlet distributions.", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 83, |
| "text": "(Blei et al., 2003b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our generalization adds structure to the generation of the Dirichlet parameters. The priors for these parameters are modeled as log-linear combinations of underlying components. Components are real-valued vectors of length equal to the vocabulary size V (for priors over word distributions) or length equal to the number of topics K (for priors over topic distributions).", |
| "cite_spans": [ |
| { |
| "start": 276, |
| "end": 290, |
| "text": "distributions)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For example, we might assume that topics about sports like baseball and football share a common prior -given by a component -with general words about sports. A fine-grained topic about steroid use in sports might be created by combining components about broader topics like sports, medicine, and crime. By modeling the priors as combinations of components that are shared across all topics, we can learn interesting connections between topics, where components provide an additional latent layer for corpus understanding.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As we'll show in the next section, by imposing certain requirements on which components feed into which topics (or documents), we can induce a variety of model structures. For example, if we want to model a topic hierarchy, we require that each topic depend on exactly one parent component. If we want to jointly model topic and ideology in a corpus of political documents ( \u00a76), we make topic priors a combination of one component from each of two groups: a topical component and an ideological component, resulting in ideologyspecific topics like \"conservative economics\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Components construct priors as follows. For the topic-specific word distributions \u03c6, there are C (\u03c6) topic components. The kth topic's prior over \u03c6 k is a weighted combination (with coefficient vector \u03b2 k ) of the C (\u03c6) components (where component c is denoted \u03c9 c ). For the document-specific topic distributions \u03b8, there are C (\u03b8) document components. The mth document's prior over \u03b8 m is a weighted combination (coefficients \u03b1 m ) of the C (\u03b8) components (where component c is denoted \u03b4 c ).", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 100, |
| "text": "(\u03c6)", |
| "ref_id": null |
| }, |
| { |
| "start": 216, |
| "end": 219, |
| "text": "(\u03c6)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Once conditioned on these priors, the model is identical to LDA. The generative story is described in Figure 1 . We call this family of models SPRITE: Structured PRIor Topic modEls.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 102, |
| "end": 110, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To illustrate the role that components can play, consider an example in which we are modeling research topics in a corpus of NLP abstracts (as we do in \u00a77.3). Consider three speech-related topics: signal processing, automatic speech recognition, and dialog systems. Conceptualized as a hierarchy, these topics might belong to a higher level category of spoken language processing. SPRITE allows the relationship between these three topics to be defined in two ways. One, we can model that these topics will all have words in common. This is handled by the topic components -these three topics could all draw from a common \"spoken lan-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 Generate hyperparameters: \u03b1, \u03b2, \u03b4, \u03c9 ( \u00a73)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 For each document m, generate parameters:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "1.\u03b8 mk = exp( C (\u03b8) c=1 \u03b1 mc \u03b4 ck ), 1\u2264k\u2264K 2. \u03b8 m \u223c Dirichlet(\u03b8 m )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 For each topic k, generate parameters:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "1.\u03c6 kv = exp( C (\u03c6 ) c=1 \u03b2 kc \u03c9 cv ), 1\u2264v\u2264V 2. \u03c6 k \u223c Dirichlet(\u03c6 k )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 For each token (m, n), generate data:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "1. Topic (unobserved): z m,n \u223c \u03b8 m 2. Word (observed): w m,n \u223c \u03c6 zm,n", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Figure 1: The generative story of SPRITE. The difference from latent Dirichlet allocation (Blei et al., 2003b) is the generation of the Dirichlet parameters.", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 110, |
| "text": "(Blei et al., 2003b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "guage\" topic component, with high-weight words such as speech and spoken, which informs the prior of all three topics. Second, we can model that these topics are likely to occur together in documents. For example, articles about dialog systems are likely to discuss automatic speech recognition as a subroutine. This is handled by the document components -there could be a \"spoken language\" document component that gives high weight to all three topics, so that if a document draw its prior from this component, then it is more likely to give probability to these topics together.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The next section will describe how particular priors over the coefficients can induce various structures such as hierarchies and factorizations, and components and coefficients can also be provided as input to incorporate supervision and prior knowledge. The general prior structure used in SPRITE can be used to represent a wide array of existing topic models, outlined in Section 4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modeling with Structured Priors", |
| "sec_num": "2" |
| }, |
| { |
| "text": "By changing the particular configuration of the hyperparameters -the component coefficients (\u03b1 and \u03b2) and the component weights (\u03b4 and \u03c9) -we obtain a diverse range of model structures and behaviors. We now describe possible structures and the corresponding priors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Structures", |
| "sec_num": "3" |
| }, |
| { |
| "text": "This subsection discusses various graph structures that can describe the relation between topic components and topics, and between document components and documents, illustrated in Figure 2 . Example graph structures describing possible relations between components (middle row) and topics or documents (bottom row). Edges correspond to non-zero values for \u03b1 or \u03b2 (the component coefficients defining priors over the document and topic distributions). The root node is a shared prior over the component weights (with other possibilities discussed in \u00a73.3).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 181, |
| "end": 189, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 192, |
| "end": 278, |
| "text": "Example graph structures describing possible relations between components (middle row)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Component Structures", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The general SPRITE model can be thought of as a dense directed acyclic graph (DAG), where every document or topic is connected to every component with some weight \u03b1 or \u03b2. When many of the \u03b1 or \u03b2 coefficients are zero, the DAG becomes sparse. A sparse DAG has an intuitive interpretation: each document or topic depends on some subset of components. The default prior over coefficients that we use in this study is a 0-mean Gaussian distribution, which encourages the weights to be small. We note that to induce a sparse graph, one could use a 0-mean Laplace distribution as the prior over \u03b1 and \u03b2, which prefers parameters such that some components are zero.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Directed Acyclic Graph", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "When each document or topic has exactly one parent (one nonzero coefficient) we obtain a two-level tree structure. This structure naturally arises in topic hierarchies, for example, where fine-grained topics are children of coarse-grained topics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tree", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "To create an (unweighted) tree, we require \u03b1 mc \u2208 {0, 1} and c \u03b1 mc = 1 for each document m. Similarly, \u03b2 kc \u2208 {0, 1} and c \u03b2 kc = 1 for each topic k. In this setting, \u03b1 m and \u03b2 k are indicator vectors which select a single component.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tree", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "In this study, rather than strictly requiring \u03b1 m and \u03b2 k to be binary-valued indicator vectors, we create a relaxation that allows for easier parameter estimation. We let \u03b1 m and \u03b2 k to real-valued variables in a simplex, but place a prior over their values to encourage sparse values, favoring vectors with a single component near 1 and others near 0. This is achieved using a Dirichlet(\u03c1 < 1) distribution as the prior over \u03b1 and \u03b2, which has higher density near the boundaries of the simplex. 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tree", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "For a weighted tree, \u03b1 and \u03b2 could be a product of two variables: an \"integer-like\" indicator vector with sparse Dirichlet prior as suggested above, combined with a real-valued weight (e.g., with a Gaussian prior). We take this approach in our model of topic and perspective ( \u00a76).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tree", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "By using structured sparsity over the DAG, we can obtain a structure where components are grouped into G factors, and each document or topic has one parent from each group. Figure 2 (d) illustrates this: the left three components belong to one group, the right two belong to another, and each bottom node has exactly one parent from each. This is a DAG that we call a \"factored forest\" because the subgraphs associated with each group in isolation are trees. This structure arises in \"multidimensional\" models like SAGE (Eisenstein et al., 2011) and Factorial LDA (Paul and Dredze, 2012) , which allow tokens to be associated with multiple variables (e.g. a topic along with a variable denoting positive or negative sentiment). This allows word distributions to depend on both factors.", |
| "cite_spans": [ |
| { |
| "start": 520, |
| "end": 545, |
| "text": "(Eisenstein et al., 2011)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 564, |
| "end": 587, |
| "text": "(Paul and Dredze, 2012)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 173, |
| "end": 181, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Factored Forest", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "The \"exactly one parent\" indicator constraint is the same as in the tree structure but enforces a tree only within each group. This can therefore be (softly) modeled using a sparse Dirichlet prior as described in the previous subsection. In this case, the subsets of components belonging to each factor have separate sparse Dirichlet priors. Using the example from Figure 2(d) , the first three component indicators would come from one Dirichlet, while the latter two component indicators would come from a second.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 365, |
| "end": 376, |
| "text": "Figure 2(d)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Factored Forest", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "A desirable property for many situations is for the topic and document components to correspond to bution as the prior to encourage sparsity. The Dirichlet distribution is the multivariate extension of the Beta distribution. each other. For example, if we think of the components as coarse-grained topics in a hierarchy, then the coefficients \u03b2 enforce that topic word distributions share a prior defined by their parent \u03c9 component, while the coefficients \u03b1 represent a document's proportions of coarse-grained topics, which effects the document's prior over child topics (through the \u03b4 vectors). Consider the example with spoken language topics in \u00a72: these three topics (signal processing, speech recognition, and dialog systems) are a priori likely both to share the same words and to occur together in documents. By tying these together, we ensure that the patterns are consistent across the two types of components, and the patterns from both types can reinforce each other during inference.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tying Topic and Document Components", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In this case, the number of topic components is the same as the number of document components (C (\u03c6) = C (\u03b8) ), and the coefficients (\u03b2 cz ) of the topic components should correlate with the weights of the document components (\u03b4 zc ). The approach we take ( \u00a76) is to define \u03b4 and \u03b2 as a product of two variables (suggested in \u00a73.1.2): a binary mask variable (with sparse Dirichlet prior), which we let be identical for both \u03b4 and \u03b2, and a real-valued positive weight.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tying Topic and Document Components", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As for priors over the component weights \u03b4 and \u03c9, we assume they are generated from a 0-mean Gaussian. While not experimented with in this study, it is also possible to allow the components themselves to have rich priors which are functions of higher level components. For example, rather than assuming a mean of zero, the mean could be a weighted combination of higher level weight vectors. This approach was used by in Factorial LDA, in which each \u03c9 component had its own Gaussian prior provided as input to guide the parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Components", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We now describe several existing Dirichlet prior topic models and show how they are special cases of SPRITE. Table 1 summarizes these models and their relation to SPRITE. In almost every case, we also describe how the SPRITE representation of the model offers improvements over the original model or can lead to novel extensions. Table 1 : Topic models with Dirichlet priors that are generalized by SPRITE. The description of each model can be found in the noted section number. PAM is not equivalent, but captures very similar behavior. The described component formulations of SCTM and SAGE are equivalent, but these differ from SPRITE in that the components directly define the parameters, rather than priors over the parameters.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 109, |
| "end": 116, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 330, |
| "end": 337, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Special Cases and Extensions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In LDA (Blei et al., 2003b) , all \u03b8 vectors are drawn from the same prior, as are all \u03c6 vectors. This is a basic instance of our model with only one component at the topic and document levels, C (\u03b8) = C (\u03c6) = 1, with coefficients \u03b1 = \u03b2 = 1.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 27, |
| "text": "(Blei et al., 2003b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Latent Dirichlet Allocation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Shared components topic models (SCTM) (Gormley et al., 2010) define topics as products of \"components\", where components are word distributions. To use the notation of our paper, the kth topic's word distribution in SCTM is parameterized by \u03c6 kv \u221d c \u03c9 \u03b2 kc cv , where the \u03c9 vectors are word distributions (rather than vectors in R V ), and the \u03b2 kc \u2208 {0, 1} variables are indicators denoting whether component c is in topic k. This is closely related to SPRITE, where topics also depend on products of underlying components. A major difference is that in SCTM, the topic-specific word distributions are exactly defined as a product of components, whereas in SPRITE, it is only the prior that is a product of components. 2 Another difference is that SCTM has an unweighted product of components (\u03b2 is binary), whereas SPRITE allows for weighted products. The log-linear parameterization leads to simpler optimization procedures than the product parameterization. Finally, the components in SCTM only apply to the word distributions, and not the topic distributions in documents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Shared Components Topic Models", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Factored topic models combine multiple aspects of the text to generate the document (instead of just topics). One such topic model is Factorial LDA (FLDA) (Paul and Dredze, 2012) . In FLDA, \"topics\" are actually tuples of potentially multiple variables, such as aspect and sentiment in online reviews . Each document distribution \u03b8 m is a distribution over pairs (or higherdimensional tuples if there are more than two factors), and each pair (j, k) has a word distribution \u03c6 (j,k) . FLDA uses a similar log-linear parameterization of the Dirichlet priors as SPRITE. Using our notation, the Dirichlet(\u03c6 (j,k) ) prior for", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 178, |
| "text": "(Paul and Dredze, 2012)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factored Topic Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u03c6 (j,k) is defined as\u03c6 (j,k),v =exp(\u03c9 jv +\u03c9 kv ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factored Topic Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where \u03c9 j is a weight vector over the vocabulary for the jth component of the first factor, and \u03c9 k encodes the weights for the kth component of the second factor. (Some bias terms are omitted for simplicity.) The prior over \u03b8 m has a similar form:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factored Topic Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "\u03b8 m,(j,k) =exp(\u03b1 mj + \u03b1 mk )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factored Topic Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": ", where \u03b1 mj is document m's preference for component j of the first factor (and likewise for k of the second).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factored Topic Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "This corresponds to an instantiation of SPRITE using an unweighted factored forest ( \u00a73.1.3), where \u03b2 zc = \u03b4 cz ( \u00a73.2, recall that \u03b4 are document components while \u03b2 are the topic coefficients). Each subtopic z (which is a pair of variables in the two-factor model) has one parent component from each factor, indicated by \u03b2 z which is binaryvalued. At the document level in the two-factor example, \u03b4 j is an indicator vector with values of 1 for all pairs with j as the first component, and thus the coefficient \u03b1 mj controls the prior for all such pairs of the form (j, \u2022), and likewise \u03b4 k indicates pairs with k as the second component, controlling the prior over (\u2022, k).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factored Topic Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The SPRITE representation offers a benefit over the original FLDA model. FLDA assumes that the entire Cartesian product of the different factors is represented in the model (e.g. \u03c6 parameters for every possible tuple), which leads to issues with efficiency and overparameterization with higher numbers of factors. With SPRITE, we can simply fix the number of \"topics\" to a number smaller than the size of the Cartesian product, and the model will learn which subset of tuples are included, through the values of \u03b2 and \u03b4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factored Topic Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Finally, another existing model family that allows for topic factorization is the sparse additive generative model (SAGE) (Eisenstein et al., 2011) . SAGE uses a log-linear parameterization to define word distributions. SAGE is a general family of models that need not be factored, but is presented as an efficient solution for including multiple factors, such as topic and geography or topic and au-thor ideology. Like SCTM, \u03c6 is exactly defined as a product of \u03c9 weights, rather than our approach of using the product to define a prior over \u03c6.", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 147, |
| "text": "(Eisenstein et al., 2011)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Factored Topic Models", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "While the two previous subsections primarily focused on word distributions (with FLDA being an exception that focused on both), SPRITE's priors over topic distributions also have useful characteristics. The component-specific \u03b4 vectors can be interpreted as common topic distribution patterns, where each component is likely to give high weight to groups of topics that tend to occur together. Each document's \u03b1 weights encode which of the topic groups are present in that document.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Hierarchies and Correlations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Similar properties are captured by the Pachinko allocation model (PAM) (Li and McCallum, 2006) . Under PAM, each document has a distribution over supertopics. Each supertopic is associated with a Dirichlet prior over subtopic distributions, where subtopics are the low level topics that are associated with word parameters \u03c6. Documents also have supertopic-specific distributions over subtopics (drawn from each supertopicspecific Dirichlet prior). Each topic in a document is drawn by first drawing a supertopic from the document's distribution, then drawing a subtopic from that supertopic's document distribution.", |
| "cite_spans": [ |
| { |
| "start": 71, |
| "end": 94, |
| "text": "(Li and McCallum, 2006)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Hierarchies and Correlations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "While not equivalent, this is quite similar to SPRITE where document components correspond to supertopics. Each document's \u03b1 weights can be interpreted to be similar to a distribution over supertopics, and each \u03b4 vector is that supertopic's contribution to the prior over subtopics. The prior over the document's topic distribution is thus affected by the document's supertopic weights \u03b1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Hierarchies and Correlations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The SPRITE formulation naturally allows for powerful extensions to PAM. One possibility is to include topic components for the word distributions, in addition to document components, and to tie together \u03b4 cz and \u03b2 zc ( \u00a73.2). This models the intuitive characteristic that subtopics belonging to similar supertopics (encoded by \u03b4) should come from similar priors over their word distributions (since they will have similar \u03b2 values). That is, children of a supertopic are topically related -they are likely to share words. This is a richer alternative to the hierarchical variant of PAM proposed by Mimno et al. (2007) , which modeled separate word distributions for supertopics and subtopics, but the subtopics were not dependent on the super-topic word distributions. Another extension is to form a strict tree structure, making each subtopic belong to exactly one supertopic: a true hierarchy.", |
| "cite_spans": [ |
| { |
| "start": 598, |
| "end": 617, |
| "text": "Mimno et al. (2007)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Hierarchies and Correlations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "SPRITE also naturally provides the ability to condition document topic distributions on features of the document, such as a user rating in a review. To do this, let the number of document components be the number of features, and the value of \u03b1 mc is the mth document's value of the cth feature. The \u03b4 vectors then influence the document's topic prior based on the feature values. For example, increasing \u03b1 mc will increase the prior for topic z if \u03b4 cz is positive and decrease the prior if \u03b4 cz is negative. This is similar to the structure used for PAM ( \u00a74.4), but here the \u03b1 weights are fixed and provided as input, rather than learned and interpreted as supertopic weights. This is identical to the Dirichlet-multinomial regression (DMR) topic model (Mimno and McCallum, 2008) . The DMR topic model define's each document's Dirichlet prior over topics as a log-linear function of the document's feature values and regression coefficients for each topic. The cth feature's regression coefficients correspond to the \u03b4 c vector in SPRITE.", |
| "cite_spans": [ |
| { |
| "start": 756, |
| "end": 782, |
| "text": "(Mimno and McCallum, 2008)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conditioning on Document Attributes", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "We now discuss how to infer the posterior of the latent variables z and parameters \u03b8 and \u03c6, and find maximum a posteriori (MAP) estimates of the hyperparameters \u03b1, \u03b2, \u03b4, and \u03c9, given their hyperpriors. We take a Monte Carlo EM approach, using a collapsed Gibbs sampler to sample from the posterior of the topic assignments z conditioned on the hyperparameters, then optimizing the hyperparameters using gradient-based optimization conditioned on the samples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference and Parameter Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Given the hyperparameters, the sampling equations are identical to the standard LDA sampler (Griffiths and Steyvers, 2004) . The partial derivative of the collapsed log likelihood L of the corpus with respect to each hyperparameter \u03b2 kc is:", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 122, |
| "text": "(Griffiths and Steyvers, 2004)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference and Parameter Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2202L \u2202\u03b2 kc = \u2202P (\u03b2) \u2202\u03b2 kc + v \u03c9 cv\u03c6kv \u00d7 (1) \u03a8(n k v +\u03c6 kv ) \u2212\u03a8(\u03c6 kv ) +\u03a8( k \u03c6 k v ) \u2212\u03a8( k n k v +\u03c6 k v ) where\u03c6 kv =exp( c \u03b2 kc \u03c9 c v ), n k v", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference and Parameter Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "is the number of times word v is assigned to topic k (in the samples from the E-step), and \u03a8 is the digamma function, the derivative of the log of the gamma function. The digamma terms arise from the Dirichlet-multinomial distribution, when integrating out the parameters \u03c6. P (\u03b2) is the hyperprior. For a 0-mean Gaussian hyperprior with variance \u03c3 2 , \u2202P (\u03b2) \u2202\u03b2 kc = \u2212 \u03b2 kc \u03c3 2 . Under a Dirchlet(\u03c1) hyperprior, when we want \u03b2 to represent an indicator vector ( \u00a73.1.2), \u2202P (\u03b2) \u2202\u03b2 kc = \u03c1\u22121 \u03b2 kc . The partial derivatives for the other hyperparameters are similar. Rather than involving a sum over the vocabulary, \u2202L \u2202\u03b4 ck sums over documents, while \u2202L \u2202\u03c9cv and \u2202L \u2202\u03b1mc sum over topics. Our inference algorithm alternates between one Gibbs iteration and one iteration of gradient ascent, so that the parameters change gradually. For unconstrained parameters, we use the update rule:", |
| "cite_spans": [ |
| { |
| "start": 353, |
| "end": 359, |
| "text": "\u2202P (\u03b2)", |
| "ref_id": null |
| }, |
| { |
| "start": 472, |
| "end": 478, |
| "text": "\u2202P (\u03b2)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference and Parameter Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "x t+1 =x t + \u03b7 t \u2207L(x t )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference and Parameter Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": ", for some variable x and a step size \u03b7 t at iteration t. For parameters constrained to the simplex (such as when \u03b2 is a soft indicator vector), we use exponentiated gradient ascent (Kivinen and Warmuth, 1997) with the update rule:", |
| "cite_spans": [ |
| { |
| "start": 182, |
| "end": 209, |
| "text": "(Kivinen and Warmuth, 1997)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference and Parameter Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "x t+1 i \u221d x t i exp(\u03b7 t \u2207 i L(x t ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference and Parameter Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": ".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference and Parameter Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For variables that we prefer to be binary but have softened to continuous variables using sparse Beta or Dirichlet priors, we can straightforwardly strengthen the preference to be binary by modifying the objective function to favor the prior more heavily. Specifically, under a Dirichlet(\u03c1<1) prior we will introduce a scaling parameter \u03c4 t \u2265 1 to the prior log likelihood: \u03c4 t log P (\u03b2) with partial derivative \u03c4 t \u03c1\u22121 \u03b2 kc , which adds extra weight to the sparse Dirichlet prior in the objective. The algorithm used in our experiments begins with \u03c4 1 = 1 and optionally increases \u03c4 over time. This is a deterministic annealing approach, where \u03c4 corresponds to an inverse temperature (Ueda and Nakano, 1998; Smith and Eisner, 2006) .", |
| "cite_spans": [ |
| { |
| "start": 685, |
| "end": 708, |
| "text": "(Ueda and Nakano, 1998;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 709, |
| "end": 732, |
| "text": "Smith and Eisner, 2006)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tightening the Constraints", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "As \u03c4 approaches infinity, the prior-annealed MAP objective max \u03b2 P (\u03c6|\u03b2)P (\u03b2) \u03c4 approaches max \u03b2 P (\u03c6|\u03b2) max \u03b2 P (\u03b2). Annealing only the prior P (\u03b2) results in maximization of this term only, while the outer max chooses a good \u03b2 under P (\u03c6|\u03b2) as a tie-breaker among all \u03b2 values that maximize the inner max (binary-valued \u03b2). 3 We show experimentally ( \u00a77.2.2) that annealing the prior yields values that satisfy the constraints.", |
| "cite_spans": [ |
| { |
| "start": 326, |
| "end": 327, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tightening the Constraints", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We will now describe a SPRITE model that encompasses nearly all of the structures and extensions described in \u00a73-4, followed by experimental results using this model to jointly capture topic and \"perspective\" in a corpus of political debates (where perspective corresponds to ideology) and a corpus of online doctor reviews (where perspective corresponds to the review sentiment). First, we will create a topic hierarchy ( \u00a74.4). The hierarchy will model both topics and documents, where \u03b1 m is document m's supertopic proportions, \u03b4 c is the cth supertopic's subtopic prior, \u03c9 c is the cth supertopic's word prior, and \u03b2 k is the weight vector that selects the kth topic's parent supertopic, which incorporates (soft) indicator vectors to encode a tree structure ( \u00a73.1.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We want a weighted tree; while each \u03b2 k has only one nonzero element, the nonzero element can be a value other than 1. We do this by replacing the single coefficient \u03b2 kc with a product of two variables: b kc\u03b2kc . Here,\u03b2 k is a real-valued weight vector, while b kc is a binary indicator vector which zeroes out all but one element of \u03b2 k . We do the same with the \u03b4 vectors, replacing \u03b4 ck with b kc\u03b4ck . The b variables are shared across both topic and document components, which is how we tie these together ( \u00a73.2). We relax the binary requirement and instead allow a positive real-valued vector whose elements sum to 1, with a Dirichlet(\u03c1<1) prior to encourage sparsity ( \u00a73.1.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "To be properly interpreted as a hierarchy, we constrain the coefficients \u03b1 and \u03b2 (and by extension, \u03b4) to be positive. To optimize these parameters in a mathematically convenient way, we write \u03b2 kc as exp(log \u03b2 kc ), and instead optimize log \u03b2 kc \u2208 R rather than \u03b2 kc \u2208 R + .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Second, we factorize ( \u00a74.3) our hierarchy such that each topic depends not only on its supertopic, but also on a value indicating perspective. For example, a conservative topic about energy will appear differently from a liberal topic about energy. The prior for a topic will be a log-linear combination of both a supertopic (e.g. energy) and a perspective (e.g. liberal) weight vector. The variables associated with the perspective component are denoted with superscript (P ) rather than subscript c.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "To learn meaningful perspective parameters, we include supervision in the form of document attributes ( \u00a74.5). Each document includes a pos-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 b k \u223c Dirichlet(\u03c1 < 1) (soft indicator)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 \u03b1 (P ) is given as input (perspective value)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 \u03b4 (P ) k = \u03b2 (P ) k \u2022\u03c6 kv = exp(\u03c9 (B) v + \u03b2 (P ) k \u03c9 (P ) v + c b kc\u03b2kc \u03c9 cv ) \u2022\u03b8 mk = exp(\u03b4 (B) k + \u03b1 (P ) m \u03b4 (P ) k + c b kc \u03b1 mc\u03b4ck )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Figure 3: Summary of the hyperparameters in our SPRITEbased topic and perspective model ( \u00a76).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "itive or negative score denoting the perspective, which is the variable \u03b1 (P ) m for document m. Since \u03b1 (P ) are the coefficients for \u03b4 (P ) , positive values of \u03b4 (P ) k indicate that topic k is more likely if the author is conservative (which has a positive \u03b1 score in our data), and less likely if the author is liberal (which has a negative score). There is only a single perspective component, but it represents two ends of a spectrum with positive and negative weights; \u03b2 (P ) and \u03b4 (P ) are not constrained to be positive, unlike the supertopics. We also set \u03b2", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 109, |
| "text": "(P )", |
| "ref_id": null |
| }, |
| { |
| "start": 137, |
| "end": 141, |
| "text": "(P )", |
| "ref_id": null |
| }, |
| { |
| "start": 165, |
| "end": 169, |
| "text": "(P )", |
| "ref_id": null |
| }, |
| { |
| "start": 490, |
| "end": 494, |
| "text": "(P )", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "(P ) k = \u03b4 (P )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "k . This means that topics with positive \u03b4 (P ) k will also have a positive \u03b2 coefficient that is multiplied with the perspective word vector \u03c9 (P ) .", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 148, |
| "text": "(P )", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Finally, we include \"bias\" component vectors denoted \u03c9 (B) and \u03b4 (B) , which act as overall weights over the vocabulary and topics, so that the component-specific \u03c9 and \u03b4 weights can be interpreted as deviations from the global bias weights. Figure 3 summarizes the model. This includes most of the features described above (trees, factored structures, tying topic and document components, and document attributes), so we can ablate model features to measure their effect.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 58, |
| "text": "(B)", |
| "ref_id": null |
| }, |
| { |
| "start": 65, |
| "end": 68, |
| "text": "(B)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 242, |
| "end": 250, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Factored Hierarchical Model of Topic and Perspective", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We applied our models to two corpora:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Experimental Setup", |
| "sec_num": "7.1" |
| }, |
| { |
| "text": "\u2022 Debates: A set of floor debates from the 109th-112th U.S. Congress, collected by Nguyen et al. (2013) , who also applied a hierarchical topic model to this data. Each document is a transcript of one speaker's turn in a debate, and each document includes the first dimension of the DW-NOMINATE score (Lewis and Poole, 2004) , a real-valued score indicating how conservative (positive) or liberal (negative) the speaker is. This value is \u03b1 (P ) . We took a sample of 5,000 documents from the House debates (850,374 tokens; 7,426 types), balanced across party affilia-tion. We sampled from the most partisan speakers, removing scores below the median value.", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 103, |
| "text": "Nguyen et al. (2013)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 301, |
| "end": 324, |
| "text": "(Lewis and Poole, 2004)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 440, |
| "end": 444, |
| "text": "(P )", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Experimental Setup", |
| "sec_num": "7.1" |
| }, |
| { |
| "text": "\u2022 Reviews: Doctor reviews from RateMDs.com, previously analyzed using FLDA Wallace et al., 2014) . The reviews contain ratings on a 1-5 scale for multiple aspects. We centered the ratings around the middle value 3, then took reviews that had the same sign for all aspects, and averaged the scores to produce a value for \u03b1 (P ) . Our corpus contains 20,000 documents (476,991 tokens; 10,158 types), balanced across positive/negative scores.", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 96, |
| "text": "Wallace et al., 2014)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 322, |
| "end": 326, |
| "text": "(P )", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Experimental Setup", |
| "sec_num": "7.1" |
| }, |
| { |
| "text": "Unless otherwise specified, K=50 topics and C=10 components (excluding the perspective component) for Debates, and K=20 and C=5 for Reviews. These values were chosen as a qualitative preference, not optimized for predictive performance, but we experiment with different values in \u00a77.2.2. We set the step size \u03b7 t according to Ada-Grad (Duchi et al., 2011) , where the step size is the inverse of the sum of squared historical gradients. 4 We place a sparse Dirichlet(\u03c1=0.01) prior on the b variables, and apply weak regularization to all other hyperparameters via a N (0, 10 2 ) prior. These hyperparameters were chosen after only minimal tuning, and were selected because they showed stable and reasonable output qualitatively during preliminary development.", |
| "cite_spans": [ |
| { |
| "start": 335, |
| "end": 355, |
| "text": "(Duchi et al., 2011)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 437, |
| "end": 438, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Experimental Setup", |
| "sec_num": "7.1" |
| }, |
| { |
| "text": "We ran our inference algorithm for 5000 iterations, estimating the parameters \u03b8 and \u03c6 by averaging the final 100 iterations. Our results are averaged across 10 randomly initialized samplers. 5 Figure 4 shows examples of topics learned from the Reviews corpus. The figure includes the highest probability words in various topics as well as the highest weight words in the supertopic components and perspective component, which feed into the priors over the topic parameters. We see that one supertopic includes many words related to surgery, such as procedure and performed, and has multiple children, including a topic about dental work. Another supertopic includes words describing family members such as kids and husband.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 193, |
| "end": 201, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets and Experimental Setup", |
| "sec_num": "7.1" |
| }, |
| { |
| "text": "One topic has both supertopics as parents, which appears to describe surgeries that saved a family member's life, with top words including {saved, life, husband, cancer}. The figure also illustrates which topics are associated more with positive or negative reviews, as indicated by the value of \u03b4 (P ) .", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 302, |
| "text": "(P )", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Output", |
| "sec_num": "7.2.1" |
| }, |
| { |
| "text": "Interpretable parameters were also learned from the Debates corpus. Consider two topics about energy that have polar values of \u03b4 (P ) . The conservative-leaning topic is about oil and gas, with top words including {oil, gas, companies, prices, drilling}. The liberal-leaning topic is about renewable energy, with top words including {energy, new, technology, future, renewable}. Both of these topics share a common parent of an industry-related supertopic whose top words are {industry, companies, market, price}. A nonpartisan topic under this same supertopic has top words {credit, financial, loan, mortgage, loans}.", |
| "cite_spans": [ |
| { |
| "start": 129, |
| "end": 133, |
| "text": "(P )", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Output", |
| "sec_num": "7.2.1" |
| }, |
| { |
| "text": "We evaluated the model on two predictive tasks as well as topic quality. The first metric is perplexity of held-out text. The held-out set is based on tokens rather than documents: we trained on even numbered tokens and tested on odd tokens. This is a type of \"document completion\" evaluation (Wallach et al., 2009b) which measures how well the model can predict held-out tokens of a document after observing only some.", |
| "cite_spans": [ |
| { |
| "start": 293, |
| "end": 316, |
| "text": "(Wallach et al., 2009b)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "We also evaluated how well the model can predict the attribute value (DW-NOMINATE score or user rating) of the document. We trained a linear regression model using the document topic distributions \u03b8 as features. We held out half of the documents for testing and measured the mean absolute error. When estimating document-specific SPRITE parameters for held-out documents, we fix the feature value \u03b1 (P ) m = 0 for that document. These predictive experiments do not directly measure performance at many of the particular tasks that topic models are well suited for, like data exploration, summarization, and visualization. We therefore also include a metric that more directly measures the quality and interpretability of topics. We use the topic coherence metric introduced by , which is based on co-occurrence statistics among each topic's most probable words and has been shown to correlate with human judgments of topic quality. This metric measures the quality of each topic, and we Figure 4 : Examples of topics (gray boxes) and components (colored boxes) learned on the Reviews corpus with 20 topics and 5 components. Words with the highest and lowest values of \u03c9 (P ) , the perspective component, are shown on the left, reflecting positive and negative sentiment words. The words with largest \u03c9 values in two supertopic components are also shown, with manually given labels. Arrows from components to topics indicate that the topic's word distribution draws from that component in its prior (with non-zero \u03b2 value). There are also implicit arrows from the perspective component to all topics (omitted for clarity). The vertical positions of topics reflect the topic's perspective value \u03b4 (P ) . Topics centered above the middle line are more likely to occur in reviews with positive scores, while topics below the middle line are more likely in negative reviews. Note that this is a \"soft\" hierarchy because the tree structure is not strictly enforced, so some topics have multiple parent components. Table 3 shows how strict trees can be learned by tuning the annealing parameter.", |
| "cite_spans": [ |
| { |
| "start": 1170, |
| "end": 1174, |
| "text": "(P )", |
| "ref_id": null |
| }, |
| { |
| "start": 1695, |
| "end": 1699, |
| "text": "(P )", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 987, |
| "end": 995, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 2008, |
| "end": 2015, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "measure the average coherence across all topics:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "1 K K k=1 M m=2 m\u22121 l=1 log DF (v km , v kl ) + 1 DF (v kl )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "where DF (v, w) is the document frequency of words v and w (the number of documents in which they both occur), DF (v) is the document frequency of word v, and v ki is the ith most probable word in topic k. We use the top M = 20 words. This metric is limited to measuring only the quality of word clusters, ignoring the potentially improved interpretability of organizing the data into certain structures. However, it is still useful as an alternative measure of performance and utility, independent of the models' predictive abilities. Using these three metrics, we compared to several variants (denoted in bold) of the full model to understand how the different parts of the model affect performance:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "\u2022 Variants that contain the hierarchy components but not the perspective component (Hierarchy only), and vice versa (Perspective only).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "\u2022 The \"hierarchy only\" model using only document components \u03b4 and no topic components. This is a PAM-style model because it exhibits similar behavior to PAM ( \u00a74.4). We also compared to the original PAM model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "\u2022 The \"hierarchy only\" model using only topic components \u03c9 and no document components. This is a SCTM-style model because it exhibits similar behavior to SCTM ( \u00a74.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "\u2022 The full model where \u03b1 (P ) is learned rather than given as input. This is a FLDA-style model that has similar behavior to FLDA ( \u00a74.3). We also compared to the original FLDA model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "\u2022 The \"perspective only\" model but without the \u03c9 (P ) topic component, so the attribute value affects only the topic distributions and not the word distributions. This is identical to the DMR model of Mimno and McCallum (2008) ( \u00a74.5 ).", |
| "cite_spans": [ |
| { |
| "start": 201, |
| "end": 233, |
| "text": "Mimno and McCallum (2008) ( \u00a74.5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "\u2022 A model with no components except for the bias vectors \u03c9 (B) and \u03b4 (B) . This is equivalent to LDA with optimized hyperparameters (learned). We also experimented with using fixed symmetric hyperparameters, using values suggested by Griffiths and Steyvers (2004) : 50/K and 0.01 for topic and word distributions.", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 72, |
| "text": "(B)", |
| "ref_id": null |
| }, |
| { |
| "start": 234, |
| "end": 263, |
| "text": "Griffiths and Steyvers (2004)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "To put the results in context, we also compare to two types of baselines: (1) \"bag of words\" baselines, where we measure the perplexity of add-one smoothed unigram language models, we measure the prediction error using bag of words features, and we measure coherence of the unigram distribution; (2) naive baselines, where we measure the perplexity of the uniform distribution over each dataset's vocabulary, the prediction error when simply predicting each attribute as the mean value in the training set, and the coherence of 20 randomly selected words (repeated for 10 trials). Table 2 shows that the full SPRITE model substantially outperforms the LDA baseline at both predictive tasks. Generally, model variants with more structure perform better predictively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 581, |
| "end": 588, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "The difference between SCTM-style and PAM-style is that the former uses only topic components (for word distributions) and the latter uses only document components (for the topic distributions). Results show that the structured priors are more important for topic than word distributions, since PAM-style has lower perplexity on both datasets. However, models with both topic and document components generally outperform either alone, including comparing the Perspective only and DMR models. The former includes both topic and document perspective components, while DMR has only a document level component.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "PAM does not significantly outperform optimized LDA in most measures, likely because it updates the hyperparameters using a moment-based approximation, which is less accurate than our gradient-based optimization. FLDA perplexity is 2.3% higher than optimized LDA on Reviews, comparable to the 4% reported by Paul and Dredze (2012) on a different corpus. The FLDA-style SPRITE variant, which is more flexible, significantly outperforms FLDA in most measures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "The results are quite different under the coherence metric. It seems that topic components (which influence the word distributions) improve coherence over LDA, while document components worsen coherence. SCTM-style (which uses only topic components) does the best in both datasets, while PAM-style (which uses only documents) does the worst. PAM also significantly improves over LDA, despite worse perplexity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "The LDA (learned) baseline substantially outperforms LDA (fixed) in all cases, highlighting the importance of optimizing hyperparameters, consistent with prior research (Wallach et al., 2009a) .", |
| "cite_spans": [ |
| { |
| "start": 169, |
| "end": 192, |
| "text": "(Wallach et al., 2009a)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "Surprisingly, many SPRITE variants also outperform the bag of words regression baseline, even though the latter was tuned to optimize performance using heavy 2 regularization, which we applied only weakly (without tuning) to the topic model features. We also point out that the \"bag of words\" version of the coherence metric (the coherence of the top 20 words) is higher than the average topic coherence, which is an artifact of how the metric is defined: the most probable words in the corpus also tend to co-occur together in most documents, so these words are considered to be highly coherent when grouped together.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Evaluation", |
| "sec_num": "7.2.2" |
| }, |
| { |
| "text": "We evaluated the full model at the two predictive tasks with varying numbers of topics ({12,25,50,100} for Debates and {5,10,20,40} for Reviews) and components ({2,5,10,20}). Figure 5 shows that performance is more sensitive to the number of topics than components, with generally less variance among the latter. More topics improve performance monotonically on Debates, while performance declines at 40 topics on Reviews. The middle range of components (5-10) tends to perform better than too few (2) or too many (20) components.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 175, |
| "end": 183, |
| "text": "Figure 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Parameter Sensitivity", |
| "sec_num": null |
| }, |
| { |
| "text": "Regardless of quantitative differences, the choice of parameters may depend on the end application and the particular structures that the user has in mind, if interpretability is important. For example, if the topic model is used as a visualization tool, then 2 components would not likely result in an interesting hierarchy to the user, even if this setting produces low perplexity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Sensitivity", |
| "sec_num": null |
| }, |
| { |
| "text": "We use a relaxation of the binary b that induces a \"soft\" tree structure. Table 3 shows the percentage of b values which are within = .001 of 0 or 1 under various annealing schedules, increasing the inverse temperature \u03c4 by 0.1% after each iteration (i.e. \u03c4 t = 1.001 t ) as well as 0.3% and no annealing at all (\u03c4 = 1). At \u03c4 = 0, we model a DAG rather than a tree, because the model has no preference that b is sparse. Many of the values are binary in the DAG case, but the sparse prior substantially increases the number of binary values, obtaining fully binary structures with sufficient annealing. We compare the DAG and tree structures more in the next subsection.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 74, |
| "end": 81, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Structured Sparsity", |
| "sec_num": null |
| }, |
| { |
| "text": "The previous subsection experimented with models that included a variety of structures, but did not provide a comparison of each structure in isolation, since most model variants were part of a complex joint model. In this section, we exper-iment with the basic SPRITE model for the three structures described in \u00a73: a DAG, a tree, and a factored forest. For each structure, we also experiment with each type of component: document, topic, and both types (combined). For this set of experiments, we included a third dataset that does not contain a perspective value:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Comparison", |
| "sec_num": "7.3" |
| }, |
| { |
| "text": "\u2022 Abstracts: A set of 957 abstracts from the ACL anthology (97,168 tokens; 8,246 types). These abstracts have previously been analyzed with FLDA (Paul and Dredze, 2012) , so we include it here to see if the factored structure that we explore in this section learns similar patterns.", |
| "cite_spans": [ |
| { |
| "start": 145, |
| "end": 168, |
| "text": "(Paul and Dredze, 2012)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Comparison", |
| "sec_num": "7.3" |
| }, |
| { |
| "text": "Based on our sparsity experiments in the previous subsection, we set \u03c4 t = 1.003 t to induce hard structures (tree and factored) and \u03c4 = 0 to induce a DAG. We keep the same parameters as the previous subsection: K=50 and C=10 for Debates and K=20 and C=5 for Reviews. For the factored structures, we use two factors, with one factor having more components than the other: 3 and 7 components for Debates, and 2 and 3 components for Reviews (the total number of components across the two factors is therefore the same as for the DAG and tree experiments). The Abstracts experiments use the same parameters as with Debates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Comparison", |
| "sec_num": "7.3" |
| }, |
| { |
| "text": "Since the Abstracts dataset does not have a perspective value to predict, we do not include prediction error as a metric, instead focusing on held-out perplexity and topic coherence (Eq. 2). Table 4 shows the results of these two metrics.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 191, |
| "end": 198, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Structure Comparison", |
| "sec_num": "7.3" |
| }, |
| { |
| "text": "Some trends are clear and consistent. Topic components always hurt perplexity, while these components typically improve coherence, as was observed in the previous subsection. It has previously been observed that perplexity and topic quality are not correlated (Chang et al., 2009) . These results show that the choice of components depends on the task at hand. Combining the two components tends to produce results somewhere in between, suggesting that using both component types is a reasonable \"default\" setting. Document components usually improve perplexity, likely due to the nature of the document completion setup, in which half of each document is held out. The document components capture correlations between topics, so by inferring the components that generated the first half of the document, the prior is adjusted to give more probability to topics that are likely to occur in the unseen second half. Another interesting trend is that the factored structure tends to perform well under both metrics, with the lowest perplexity and highest coherence in a majority of the nine comparisons (i.e. each row). Perhaps the models are capturing a natural factorization present in the data.", |
| "cite_spans": [ |
| { |
| "start": 260, |
| "end": 280, |
| "text": "(Chang et al., 2009)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Comparison", |
| "sec_num": "7.3" |
| }, |
| { |
| "text": "To understand the factored structure qualitatively, Figure 6 shows examples of components from each factor along with example topics that draw from all pairs of these components, learned on Abstracts. We find that the factor with the smaller number of components (left of the figure) seems to decompose into components representing the major themes or disciplines found in ACL abstracts, with one component expressing computational approaches (top) and the other expressing linguistic theory (bottom). The third component (not shown) has words associated with speech, including {spoken, speech, recognition}.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 52, |
| "end": 60, |
| "text": "Figure 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Structure Comparison", |
| "sec_num": "7.3" |
| }, |
| { |
| "text": "The factor shown on the right seems to decompose into different research topics: one component represents semantics (top), another syntax (bottom), with others including morphology (top words including {segmentation, chinese, morphol-ogy}) and information retrieval (top words including {documents, retrieval, ir}).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Comparison", |
| "sec_num": "7.3" |
| }, |
| { |
| "text": "Many of the topics intuitively follow from the components of these two factors. For example, the two topics expressing vector space models and distributional semantics (top left and right) both draw from the \"computational\" and \"semantics\" components, while the topics expressing ontologies and question answering (middle left and right) draw from \"linguistics\" and \"semantics\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Comparison", |
| "sec_num": "7.3" |
| }, |
| { |
| "text": "The factorization is similar to what had been previously been induced by FLDA. Figure 3 of Paul and Dredze (2012) shows components that look similar to the computational methods and linguistic theory components here, and the factor with the largest number of components also decomposes by research topic. These results show that SPRITE is capable of recovering similar structures as FLDA, a more specialized model. SPRITE is also much more flexible than FLDA. While FLDA strictly models a one-to-one mapping of topics to each pair of components, SPRITE allows multiple topics to belong to the same pair (as in the semantics examples above), and conversely SPRITE does not require that all pairs have an associated topic. This property allows SPRITE to scale to larger numbers of factors than FLDA, because the number of topics is not required to grow with the number of all possible tuples.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 113, |
| "text": "Paul and Dredze (2012)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 79, |
| "end": 87, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Structure Comparison", |
| "sec_num": "7.3" |
| }, |
| { |
| "text": "Our topic and perspective model is related to supervised hierarchical LDA (SHLDA) (Nguyen et al., 2013) , which learns a topic hierarchy while also learning regression parameters to associate topics with feature values such as political perspective. This model does not explicitly incorporate perspective-specific word priors into the topics (as in our factorized approach). The regression structure is also different. SHLDA is a \"downstream\" model, where the perspective value is a response variable conditioned on the topics. In contrast, SPRITE is an \"upstream\" model, where the topics are conditioned on the perspective value. We argue that the latter is more accurate as a generative story (the emitted words depend on the author's perspective, not the other way around). Moreover, in our model the perspective influences both the word and topic distributions (through the topic and document components, respectively).", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 103, |
| "text": "(Nguyen et al., 2013)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Inverse regression topic models (Rabinovich and Blei, 2014) use document feature values (such as political ideology) to alter the parameters of the Figure 6 : Examples of topics (gray boxes) and components (colored boxes) learned on the Abstracts corpus with 50 topics using a factored structure. The components have been grouped into two factors, one factor with 3 components (left) and one with 7 (right), with two examples shown from each. Each topic prior draws from exactly one component from each factor. topic-specific word distributions. This is an alternative to the more common approach to regression based topic modeling, where the variables affect the topic distributions rather than the word distributions. Our SPRITE-based model does both: the document features adjust the prior over topic distributions (through \u03b4), but by tying together the document and topic components (with \u03b2), the document features also affect the prior over word distributions. To the best of our knowledge, this is the first topic model to condition both topic and word distributions on the same features. The topic aspect model (Paul and Girju, 2010a ) is also a two-dimensional factored model that has been used to jointly model topic and perspective (Paul and Girju, 2010b) . However, this model does not use structured priors over the parameters, unlike most of the models discussed in \u00a74.", |
| "cite_spans": [ |
| { |
| "start": 32, |
| "end": 59, |
| "text": "(Rabinovich and Blei, 2014)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 1118, |
| "end": 1140, |
| "text": "(Paul and Girju, 2010a", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1242, |
| "end": 1265, |
| "text": "(Paul and Girju, 2010b)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 148, |
| "end": 156, |
| "text": "Figure 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "An alternative approach to incorporating user preferences and expertise are interactive topic models (Hu et al., 2013) , a complimentary approach to SPRITE.", |
| "cite_spans": [ |
| { |
| "start": 101, |
| "end": 118, |
| "text": "(Hu et al., 2013)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "We have presented SPRITE, a family of topic models that utilize structured priors to induce preferred topic structures. Specific instantiations of SPRITE are similar or equivalent to several existing topic models. We demonstrated the utility of SPRITE by constructing a single model with many different characteristics, including a topic hierarchy, a factorization of topic and perspective, and supervision in the form of document attributes. These structures were incorporated into the priors of both the word and topic distributions, unlike most prior work that considered one or the other. Our experiments explored how each of these various model features affect performance, and our results showed that models with structured priors perform better than baseline LDA models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusion", |
| "sec_num": "9" |
| }, |
| { |
| "text": "Our framework has made clear advancements with respect to existing structured topic models. For example, SPRITE is more general and offers simpler inference than the shared components topic model (Gormley et al., 2010) , and SPRITE allows for more flexible and scalable factored structures than FLDA, as described in earlier sections. Both of these models were motivated by their ability to learn interesting structures, rather than their performance at any predictive task. Similarly, our goal in this study was not to provide state of the art results for a particular task, but to demonstrate a framework for learning structures that are richer than previous structured models. Therefore, our experiments focused on understanding how SPRITE compares to commonly used models with similar structures, and how the different variants compare under different metrics. Ultimately, the model design choice depends on the application and the user needs. By unifying such a wide variety of topic models, SPRITE can serve as a common framework for enabling model exploration and bringing application-specific preferences and structure into topic models.", |
| "cite_spans": [ |
| { |
| "start": 196, |
| "end": 218, |
| "text": "(Gormley et al., 2010)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion and Conclusion", |
| "sec_num": "9" |
| }, |
| { |
| "text": "Transactions of the Association for Computational Linguistics, vol. 3, pp. 43-57, 2015. Action Editor: Janyce Wiebe. Submission batch: 7/2014; Revision batch 12/2014; Published 1/2015. c 2015 Association for Computational Linguistics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "This generalizes the technique used inPaul and Dredze (2012), who approximated binary variables with real-valued variables in (0, 1), by using a \"U-shaped\" Beta(\u03c1 < 1) distri-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The posterior becomes concentrated around the prior when the Dirichlet variance is low, in which case SPRITE behaves like SCTM. SPRITE is therefore more general.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Other modifications could be made to the objective function to induce sparsity, such as entropy regularization (Balasubramanyan and Cohen, 2013).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "AdaGrad decayed too quickly for the b variables. For these, we used a variant suggested byZeiler (2012) which uses an average of historical gradients rather than a sum.5 Our code and the data will be available at: http://cs.jhu.edu/\u02dcmpaul.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank Jason Eisner and Hanna Wallach for helpful discussions, and Viet-An Nguyen for providing the Congressional debates data. Michael Paul is supported by a Microsoft Research PhD fellowship.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| }, |
| { |
| "text": "Perplexity Prediction error Coherence Perplexity Prediction error Coherence Full model \u2020 1555.5 \u00b1 2.3 \u2020 0.615 \u00b1 0.001 -342.8 \u00b1 0.9 \u2020 1421.3 \u00b1 8.4 \u2020 0.787 \u00b1 0.006 -512.7 \u00b1 1.6 Hierarchy only \u2020 1561.8 \u00b1 1.4 0.620 \u00b1 0.002 -342.6 \u00b1 1.1 \u2020 1457.2 \u00b1 6.9 \u2020 0.804 \u00b1 0.007 -509.1 \u00b1 1.9 Perspective only \u2020 1567.3 \u00b1 2.3 \u2020 0.613 \u00b1 0.002 -342.1 \u00b1 1.2 \u2020 1413.7 \u00b1 2.2 \u2020 0.800 \u00b1 0.002 -512.0 \u00b1 1.7 SCTM-style 1572.5 \u00b1 1.6 0.620 \u00b1 0.002 \u2020 -335.8 \u00b1 1.1 1504.0 \u00b1 1.9 \u2020 0.837 \u00b1 0.002 \u2020 -490.8 \u00b1 0.9 PAM-style \u2020 1567.4 \u00b1 1.9 0.620 \u00b1 0.002 -347.6 \u00b1 1.4 \u2020 1440.4 \u00b1 2.7 \u2020 0.835 \u00b1 0.004 -542.9 \u00b1 6.7 FLDA-style \u2020 1559. 1579.6 \u00b1 1.5 0.620 \u00b1 0.001 -342.6 \u00b1 0.6 1507.9 \u00b1 2.4 0.846 \u00b1 0.002 -501.4 \u00b1 1.2 LDA fixed1659.3 \u00b1 0.9 0.622 \u00b1 0.002 -349.5 \u00b1 0.8 1517.2 \u00b1 0.4 0.920 \u00b1 0.003 -585.2 \u00b1 0.9 Bag of words 2521.6 \u00b1 0.0 0.617 \u00b1 0.000 \u2020 -196.2 \u00b1 0.0 1633.5 \u00b1 0.0 0.813 \u00b1 0.000 \u2020 -408.1 \u00b1 0.0 Naive baseline 7426.0 \u00b1 0.0 0.677 \u00b1 0.000 -852.9 \u00b1 7.4 10158.0 \u00b1 0.0 1.595 \u00b1 0.000 -795.2 \u00b1 13.0 1572.0 \u00b1 0.9 1568.7 \u00b1 2.0 1566.8 \u00b1 2.0 -342.9 \u00b1 1.2 -346.0 \u00b1 0.9 -343.2 \u00b1 1.0 Topic 1575.0 \u00b1 1.5 1573.4 \u00b1 1.8 1559.3 \u00b1 1.5 -342.4 \u00b1 0.6 -339.2 \u00b1 1.7 -333.9 \u00b1 0.9 Combined 1566.7 \u00b1 1.7 1559.9 \u00b1 1.9 1552.5 \u00b1 1.9 -342.9 \u00b1 1.3 -342.6 \u00b1 1.2 -340.3 \u00b1 1.0 Reviews Document 1456.9 \u00b1 3.8 1446.4 \u00b1 4.0 1450.4 \u00b1 5.5 -512.2 \u00b1 4.6 -527.9 \u00b1 6.5 -535.4 \u00b1 7.4 Topic 1508.5 \u00b1 1.7 1517.9 \u00b1 2.0 1502.0 \u00b1 1.9 -500.1 \u00b1 1.2 -499.0 \u00b1 0.9 -486.1 \u00b1 1.5 Combined 1464.1 \u00b1 3.3 1455.1 \u00b1 5.6 1448.5 \u00b1 8.5 -504.9 \u00b1 1.4 -527.8 \u00b1 6.1 -535.5 \u00b1 8.2 Abstracts Document 3107.7 \u00b1 7.7 3089.5 \u00b1 9.1 3098.7 \u00b1 10.2 -393.2 \u00b1 0.8 -390.8 \u00b1 0.9 -392.8 \u00b1 1.5 Topic 3241.7 \u00b1 2.1 3455.9 \u00b1 10.2 3507.4 \u00b1 9.7 -389.0 \u00b1 0.8 -388.8 \u00b1 0.7 -332.2 \u00b1 1.1 Combined 3200.8 \u00b1 3.5 3307.2 \u00b1 7.8 3364.9 \u00b1 19.1 -373.1 \u00b1 0.8 -360.6 \u00b1 0.9 -342.3 \u00b1 0.9 italian! structure! spanish! \"Computational\"! \"Semantics\"! \"Syntax\"!", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Incorporating domain knowledge into topic modeling via Dirichlet forest priors", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Andrzejewski", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Craven", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Andrzejewski, X. Zhu, and M. Craven. 2009. In- corporating domain knowledge into topic modeling via Dirichlet forest priors. In ICML.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Regularization of latent variable models to obtain sparsity", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Balasubramanyan", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "SIAM Conference on Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Balasubramanyan and W. Cohen. 2013. Regular- ization of latent variable models to obtain sparsity. In SIAM Conference on Data Mining.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A correlated topic model of Science", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Lafferty", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Annals of Applied Statistics", |
| "volume": "1", |
| "issue": "1", |
| "pages": "17--35", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Blei and J. Lafferty. 2007. A correlated topic model of Science. Annals of Applied Statistics, 1(1):17-35.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Hierarchical topic models and the nested Chinese restaurant process", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Jordan", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Tenenbaum", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Blei, T. Griffiths, M. Jordan, and J. Tenenbaum. 2003a. Hierarchical topic models and the nested Chinese restaurant process. In NIPS.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Reading tea leaves: How humans interpret topic models", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Boyd-Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Gerrish", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Blei", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Chang, J. Boyd-Graber, S. Gerrish, C. Wang, and D. Blei. 2009. Reading tea leaves: How humans interpret topic models. In NIPS.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Adaptive subgradient methods for online learning and stochastic optimization", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Duchi", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hazan", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Singer", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "JMLR", |
| "volume": "12", |
| "issue": "", |
| "pages": "2121--2159", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Duchi, E. Hazan, and Y. Singer. 2011. Adaptive sub- gradient methods for online learning and stochastic optimization. JMLR, 12:2121-2159.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Sparse additive generative models of text", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ahmed", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "P" |
| ], |
| "last": "Xing", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Eisenstein, A. Ahmed, and E. P. Xing. 2011. Sparse additive generative models of text. In ICML.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Shared components topic models", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "R" |
| ], |
| "last": "Gormley", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M.R. Gormley, M. Dredze, B. Van Durme, and J. Eis- ner. 2010. Shared components topic models. In NAACL.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Finding scientific topics", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Steyvers", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the National Academy of Sciences of the United States of America", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Griffiths and M. Steyvers. 2004. Finding scientific topics. In Proceedings of the National Academy of Sciences of the United States of America.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Interactive topic modeling", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Boyd-Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Satinoff", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Machine Learning", |
| "volume": "95", |
| "issue": "", |
| "pages": "423--469", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Y. Hu, J. Boyd-Graber, B. Satinoff, and A. Smith. 2013. Interactive topic modeling. Machine Learn- ing, 95:423-469.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Exponentiated gradient versus gradient descent for linear predictors", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kivinen", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "K" |
| ], |
| "last": "Warmuth", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Information and Computation", |
| "volume": "132", |
| "issue": "", |
| "pages": "1--63", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Kivinen and M.K. Warmuth. 1997. Exponentiated gradient versus gradient descent for linear predic- tors. Information and Computation, 132:1-63.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Measuring bias and uncertainty in ideal point estimates via the parametric bootstrap", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "B" |
| ], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "T" |
| ], |
| "last": "Poole", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Political Analysis", |
| "volume": "12", |
| "issue": "2", |
| "pages": "105--127", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J.B. Lewis and K.T. Poole. 2004. Measuring bias and uncertainty in ideal point estimates via the paramet- ric bootstrap. Political Analysis, 12(2):105-127.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Pachinko allocation: DAG-structured mixture models of topic correlations", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "W. Li and A. McCallum. 2006. Pachinko alloca- tion: DAG-structured mixture models of topic cor- relations. In International Conference on Machine Learning.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Topic models conditioned on arbitrary features with Dirichletmultinomial regression", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "UAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Mimno and A. McCallum. 2008. Topic mod- els conditioned on arbitrary features with Dirichlet- multinomial regression. In UAI.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Mixtures of hierarchical topics with Pachinko allocation", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Mimno, W. Li, and A. McCallum. 2007. Mixtures of hierarchical topics with Pachinko allocation. In International Conference on Machine Learning.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Optimizing semantic coherence in topic models", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "M" |
| ], |
| "last": "Wallach", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Talley", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Leenders", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Mimno, H.M. Wallach, E. Talley, M. Leenders, and A. McCallum. 2011. Optimizing semantic coher- ence in topic models. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Lexical and hierarchical topic regression", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Boyd-Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "V. Nguyen, J. Boyd-Graber, and P. Resnik. 2013. Lex- ical and hierarchical topic regression. In Neural In- formation Processing Systems.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Factorial LDA: Sparse multi-dimensional text models", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Neural Information Processing Systems (NIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M.J. Paul and M. Dredze. 2012. Factorial LDA: Sparse multi-dimensional text models. In Neural Informa- tion Processing Systems (NIPS).", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Drug extraction from the web: Summarizing drug experiences with multidimensional topic models", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M.J. Paul and M. Dredze. 2013. Drug extraction from the web: Summarizing drug experiences with multi- dimensional topic models. In NAACL.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "A two-dimensional topic-aspect model for discovering multi-faceted topics", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Girju", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Paul and R. Girju. 2010a. A two-dimensional topic-aspect model for discovering multi-faceted topics. In AAAI.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Summarizing contrastive viewpoints in opinionated text", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Girju", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M.J. Paul and R. Girju. 2010b. Summarizing con- trastive viewpoints in opinionated text. In Empirical Methods in Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "What affects patient (dis)satisfaction? Analyzing online doctor ratings with a joint topic-sentiment model", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [ |
| "C" |
| ], |
| "last": "Wallace", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "AAAI Workshop on Expanding the Boundaries of Health Informatics Using AI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M.J. Paul, B.C. Wallace, and M. Dredze. 2013. What affects patient (dis)satisfaction? Analyzing online doctor ratings with a joint topic-sentiment model. In AAAI Workshop on Expanding the Boundaries of Health Informatics Using AI.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "The inverse regression topic model", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Rabinovich", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Blei", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Rabinovich and D. Blei. 2014. The inverse regres- sion topic model. In International Conference on Machine Learning.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Labeled LDA: a supervised topic model for credit attribution in multi-labeled corpora", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Ramage", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Hall", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Nallapati", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Ramage, D. Hall, R. Nallapati, and C.D. Man- ning. 2009. Labeled LDA: a supervised topic model for credit attribution in multi-labeled corpora. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Annealing structural bias in multilingual weighted grammar induction", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "COLING-ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "N.A. Smith and J. Eisner. 2006. Annealing structural bias in multilingual weighted grammar induction. In COLING-ACL.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Database of NIH grants using machine-learned categories and graphical clustering", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [ |
| "M" |
| ], |
| "last": "Talley", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Newman", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [ |
| "W" |
| ], |
| "last": "Herr", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [ |
| "I" |
| ], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "M" |
| ], |
| "last": "Wallach", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "A P C" |
| ], |
| "last": "Burns", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Leenders", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Nature Methods", |
| "volume": "8", |
| "issue": "6", |
| "pages": "443--444", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E.M. Talley, D. Newman, D. Mimno, B.W. Herr II, H.M. Wallach, G.A.P.C. Burns, M. Leenders, and A. McCallum. 2011. Database of NIH grants us- ing machine-learned categories and graphical clus- tering. Nature Methods, 8(6):443-444.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Deterministic annealing EM algorithm", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Ueda", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Nakano", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Neural Networks", |
| "volume": "11", |
| "issue": "2", |
| "pages": "271--282", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "N. Ueda and R. Nakano. 1998. Deterministic anneal- ing EM algorithm. Neural Networks, 11(2):271- 282.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A large-scale quantitative analysis of latent factors and sentiment in online doctor reviews", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [ |
| "C" |
| ], |
| "last": "Wallace", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "U", |
| "middle": [], |
| "last": "Sarkar", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "A" |
| ], |
| "last": "Trikalinos", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Journal of the American Medical Informatics Association", |
| "volume": "21", |
| "issue": "6", |
| "pages": "1098--1103", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B.C. Wallace, M.J. Paul, U. Sarkar, T.A. Trikalinos, and M. Dredze. 2014. A large-scale quantitative analysis of latent factors and sentiment in online doctor reviews. Journal of the American Medical Informatics Association, 21(6):1098-1103.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Rethinking LDA: Why priors matter", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [ |
| "M" |
| ], |
| "last": "Wallach", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H.M. Wallach, D. Mimno, and A. McCallum. 2009a. Rethinking LDA: Why priors matter. In NIPS.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Evaluation methods for topic models", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [ |
| "M" |
| ], |
| "last": "Wallach", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Murray", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H.M. Wallach, I. Murray, R. Salakhutdinov, and D. Mimno. 2009b. Evaluation methods for topic models. In ICML.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Decoupling sparsity and smoothness in the discrete hierarchical Dirichlet process", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Blei", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "C. Wang and D. Blei. 2009. Decoupling sparsity and smoothness in the discrete hierarchical Dirich- let process. In NIPS.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "ADADELTA: An adaptive learning rate method", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "D" |
| ], |
| "last": "Zeiler", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M.D. Zeiler. 2012. ADADELTA: An adaptive learning rate method. CoRR, abs/1212.5701.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "text": "Figure 2: Example graph structures describing possible relations between components (middle row) and topics or documents (bottom row). Edges correspond to non-zero values for \u03b1 or \u03b2 (the component coefficients defining priors over the document and topic distributions). The root node is a shared prior over the component weights (with other possibilities discussed in \u00a73.3).", |
| "uris": null, |
| "num": null |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td>: The percentage of indicator values that are sparse (near 0 or 1) when using different annealing schedules.</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "text": "" |
| } |
| } |
| } |
| } |