| { |
| "paper_id": "Q16-1004", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:06:24.682698Z" |
| }, |
| "title": "Detecting Cross-Cultural Differences Using a Multilingual Topic Model", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [ |
| "D" |
| ], |
| "last": "Guti\u00e9rrez", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of California", |
| "location": { |
| "addrLine": "San Diego" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Computer Laboratory", |
| "institution": "University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Patricia", |
| "middle": [], |
| "last": "Lichtenstein", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of California", |
| "location": { |
| "settlement": "Merced" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Gerard", |
| "middle": [], |
| "last": "De Melo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tsinghua University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Luca", |
| "middle": [], |
| "last": "Gilardi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "ICSI", |
| "location": { |
| "settlement": "Berkeley" |
| } |
| }, |
| "email": "lucag@icsi.berkeley.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Understanding cross-cultural differences has important implications for world affairs and many aspects of the life of society. Yet, the majority of text-mining methods to date focus on the analysis of monolingual texts. In contrast, we present a statistical model that simultaneously learns a set of common topics from multilingual, non-parallel data and automatically discovers the differences in perspectives on these topics across linguistic communities. We perform a behavioural evaluation of a subset of the differences identified by our model in English and Spanish to investigate their psychological validity.", |
| "pdf_parse": { |
| "paper_id": "Q16-1004", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Understanding cross-cultural differences has important implications for world affairs and many aspects of the life of society. Yet, the majority of text-mining methods to date focus on the analysis of monolingual texts. In contrast, we present a statistical model that simultaneously learns a set of common topics from multilingual, non-parallel data and automatically discovers the differences in perspectives on these topics across linguistic communities. We perform a behavioural evaluation of a subset of the differences identified by our model in English and Spanish to investigate their psychological validity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Recent years have seen a growing interest in textmining applications aimed at uncovering public opinions and social trends (Fader et al., 2007; Monroe et al., 2008; Gerrish and Blei, 2011; Pennacchiotti and Popescu, 2011) . They rest on the assumption that the language we use is indicative of our underlying worldviews. Research in cognitive and sociolinguistics suggests that linguistic variation across communities systematically reflects differences in their cultural and moral models and goes beyond lexicon and grammar (K\u00f6vecses, 2004; Lakoff and Wehling, 2012) . Cross-cultural differences manifest themselves in text in a multitude of ways, most prominently through the use of explicit opinion vocabulary with respect to a certain topic (e.g. \"policies that benefit the poor\"), idiomatic and metaphorical language (e.g. \"the company is spinning its wheels\") and other types of figurative language, such as irony or sarcasm.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 143, |
| "text": "(Fader et al., 2007;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 144, |
| "end": 164, |
| "text": "Monroe et al., 2008;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 165, |
| "end": 188, |
| "text": "Gerrish and Blei, 2011;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 189, |
| "end": 221, |
| "text": "Pennacchiotti and Popescu, 2011)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 525, |
| "end": 541, |
| "text": "(K\u00f6vecses, 2004;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 542, |
| "end": 567, |
| "text": "Lakoff and Wehling, 2012)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The connection between language, culture and reasoning remains one of the central research questions in psychology.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Thibodeau and Boroditsky (2011) investigated how metaphors affect our decision-making. They presented two groups of human subjects with two different texts about crime. In the first text, crime was metaphorically portrayed as a virus and in the second as a beast. The two groups were then asked a set of questions on how to tackle crime in the city. As a result, while the first group tended to opt for preventive measures (e.g. stronger social policies), the second group converged on punishment-or restraint-oriented measures. According to Thibodeau and Boroditsky, their results demonstrate that metaphors have profound influence on how we conceptualize and act with respect to societal issues. This suggests that in order to gain a full understanding of social trends across populations, one needs to identify subtle but systematic linguistic differences that stem from the groups' cultural backgrounds, expressed both literally and figuratively. Performing such an analysis by hand is labor-intensive and often impractical, particularly in a multilingual setting where expertise in all of the languages of interest may be rare.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "With the rise of blogging and social media, NLP techniques have been successfully used for a number of tasks in political science, including automatically estimating the influence of particular politicians in the US senate (Fader et al., 2007) , identifying lexical features that differentiate political rhetoric of opposing parties (Monroe et al., 2008) , predicting voting patterns of politicians based on their use of language (Gerrish and Blei, 2011) , and predicting political affiliation of Twitter users (Pennacchiotti and Popescu, 2011) . Fang et al. (2012) addressed the problem of automatically detecting and visualising the contrasting perspectives on a set of topics attested in multiple distinct corpora. While successful in their tasks, all of these approaches focused on monolingual data and did not reach beyond literal language. In contrast, we present a method that detects fine-grained cross-cultural differences from multilingual data, where such differences abound, expressed both literally and figuratively. Our method brings together opinion mining and cross-lingual topic modelling techniques for this purpose. Previous approaches to cross-lingual topic modelling (Boyd-Graber and Blei, 2009; Jagarlamudi and Daum\u00e9 III, 2010) addressed the problem of mining common topics from multilingual corpora. We present a model that learns such common topics, while simultaneously identifying lexical features that are indicative of the underlying differences in perspectives on these topics by speakers of English, Spanish and Russian. These differences are mined from multilingual, non-parallel datasets of Twitter and news data. In contrast to previous work, our model does not merely output a list of monolingual lexical features for manual comparison, but also automatically infers multilingual contrasts.", |
| "cite_spans": [ |
| { |
| "start": 223, |
| "end": 243, |
| "text": "(Fader et al., 2007)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 333, |
| "end": 354, |
| "text": "(Monroe et al., 2008)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 443, |
| "end": 454, |
| "text": "Blei, 2011)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 511, |
| "end": 544, |
| "text": "(Pennacchiotti and Popescu, 2011)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 547, |
| "end": 565, |
| "text": "Fang et al. (2012)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1188, |
| "end": 1216, |
| "text": "(Boyd-Graber and Blei, 2009;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1217, |
| "end": 1249, |
| "text": "Jagarlamudi and Daum\u00e9 III, 2010)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our system (1) uses word-document co-occurrence data as input, where the words are labeled as topic words or perspective words; (2) finds the highest-likelihood dictionary between topic words in the two languages given the co-occurrence data;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "(3) finds cross-lingual topics specified by distributions over topic-words and perspective-words; and (4) automatically detects differences in perspectiveword distributions in the two languages. We perform a behavioural evaluation of a subset of the differences identified by the model and demonstrate their psychological validity. Our data and dictionaries are available from the first author upon request.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "View detection. Identifying different viewpoints is related to the well-studied area of subjectivity detection, which aims at exposing opinion, evaluation, and speculation in text (Wiebe et al., 2004) and attributing it to specific people (Awadallah et al., 2011; Abu-Jbara et al., 2012) . In our work, we are less interested in explicit local forms of subjectivity, instead aiming at detecting more general contrasts across linguistic communities.", |
| "cite_spans": [ |
| { |
| "start": 180, |
| "end": 200, |
| "text": "(Wiebe et al., 2004)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 239, |
| "end": 263, |
| "text": "(Awadallah et al., 2011;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 264, |
| "end": 287, |
| "text": "Abu-Jbara et al., 2012)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Another line of research has focused on inferring author attributes such as gender, age (Garera and Yarowsky, 2009) , location (Jones et al., 2007) , or political affiliation (Pennacchiotti and Popescu, 2011) . Such studies make use of syntactic style, discourse characteristics, as well as lexical choice. The models used for this are typically binary classifiers trained in a fully supervised fashion. In contrast, in our task, we automatically infer the topic distributions and find topic-specific contrasts.", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 115, |
| "text": "(Garera and Yarowsky, 2009)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 127, |
| "end": 147, |
| "text": "(Jones et al., 2007)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 175, |
| "end": 208, |
| "text": "(Pennacchiotti and Popescu, 2011)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Probabilistic topic models. Probabilistic topic models have proven useful for a variety of semantic tasks, such as selectional-preference induction (\u00d3 S\u00e9aghdha, 2010; Ritter et al., 2010) , sentiment analysis (Boyd-Graber and Resnik, 2010) and studying the evolution of concepts and ideas (Hall et al., 2008) . The goal of a topic model is to characterize observed data in terms of a much smaller set of unobserved, semantically coherent topics. A particularly popular probabilistic topic model is Latent Dirichlet Allocation (LDA) (Blei et al., 2003) . Under its assumptions, each document has a unique mix of topics, and each topic is a distribution over terms in the vocabulary. A topic is chosen for every word token according to the topic mix of the document to which it belongs, and then the word's identity is drawn from the corresponding topic's distribution.", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 166, |
| "text": "(\u00d3 S\u00e9aghdha, 2010;", |
| "ref_id": null |
| }, |
| { |
| "start": 167, |
| "end": 187, |
| "text": "Ritter et al., 2010)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 209, |
| "end": 239, |
| "text": "(Boyd-Graber and Resnik, 2010)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 289, |
| "end": 308, |
| "text": "(Hall et al., 2008)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 532, |
| "end": 551, |
| "text": "(Blei et al., 2003)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Handling multilingual corpora. LDA is designed for monolingual text and thus it lacks the structure necessary to model cross-lingually valid topics. While topic models can be trained individually on two languages and then the acquired topics can be matched, the correspondences between the topics for the two terms will be highly unstable. To address this, Boyd-Graber and Blei (2009) (MUTO) and Jagarlamudi and Daum\u00e9 III (2010) (JOINTLDA) introduced the notion of crosslingually valid concepts associated with different terms in different languages, using bilingual dictionaries to model topics across languages. Based on a model by Haghighi et al. (2008) , MUTO is capable of learning translations-i.e., matching between terms in the different languages being compared. The Polylingual Topic Model of Mimno et al. (2009) is another approach to finding topics in multilingual corpora, but it requires tuples composed of compa-rable documents in each language of the corpus.", |
| "cite_spans": [ |
| { |
| "start": 634, |
| "end": 656, |
| "text": "Haghighi et al. (2008)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 803, |
| "end": 822, |
| "text": "Mimno et al. (2009)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Topic models for view detection. LDA also assumes that the distribution of each topic is fixed across all documents in a corpus. Therefore, a topic associated with, e.g., war will have the same distribution over the lexicon regardless of whether the document was taken from a pro-war editorial or an anti-war speech. However, in reality we may expect a single topic to exhibit systematic and predictable variations in its distribution based on authorship.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The cross-collection LDA model by Paul and Girju (2009) addresses this by specifically aiming to expose viewpoint differences across different document collections. Ahmed and Xing (2010) proposed a similar model for detecting ideological differences. Fang et al. (2012) 's Cross-Perspective Topic (CPT) model breaks up the terms in the vocabulary into topic terms and perspective terms with different generative processes, and differentiates between different collections of documents within the corpus. The topic terms are assumed to be generated as in LDA. However, the distribution of perspective terms in a document is taken to be dependent on both the topic mixture of the document as well as the collection from which the document is drawn.", |
| "cite_spans": [ |
| { |
| "start": 34, |
| "end": 55, |
| "text": "Paul and Girju (2009)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 165, |
| "end": 186, |
| "text": "Ahmed and Xing (2010)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 251, |
| "end": 269, |
| "text": "Fang et al. (2012)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Recent works proposed models for specific types of data. use user identities and interactions in threaded discussions, while Gottipati et al. (2013) developed a topic model for Debatepedia, a semi-structured resource in which arguments are explicitly enumerated. However, all of these models perform their analyses on monolingual datasets. Thus, they are useful for comparing different ideologies expressed in the same language, but not for cross-linguistic comparisons.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The goal of our model is to analyse large, nonparallel, multilingual corpora and present crosslingually valid topics and the associated perspectives, automatically inferring the differences in conceptualization of these topics across cultures. Following Boyd-Graber and Blei (2009) and Jagarlamudi and Daum\u00e9 III (2010), our distributions of latent topics range over latent, cross-lingual topic concepts that manifest themselves as language-specific topic words. We use bilingual dictionaries, contain- ing words in one language and their translations in another language, to represent the topic concepts. These are represented as a bipartite graph, with each translation entry being an edge and each topic word in the two languages being a vertex. While the topic words are tied together by the translation dictionary, the perspective words can vary freely across languages. Following Fang et al. 2012, we treat nouns as topic words and verbs and adjectives as perspective words 1 . The model assumes that adjective and verb tokens in each document are assigned to topics in proportion to the topic assignments of the topic word tokens. Then, the perspective term for this topic is drawn depending on the topic assignment and the language of the speaker.", |
| "cite_spans": [ |
| { |
| "start": 254, |
| "end": 281, |
| "text": "Boyd-Graber and Blei (2009)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Given the languages \u2208 {a, b}, our model infers the distributions of multi-lingual topics and languagespecific perspective-words ( Fig. 2 ), as follows:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 130, |
| "end": 136, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Basic Generative Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "1. Draw a set C of concepts (u, v) matching topic word u from language a to topic word v from language b, where the probability of concept (u, v) is proportional to a prior \u03c0 u,v (e.g. based on information from a translation dictionary).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Basic Generative Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 For topic indices k \u2208 {1, ..., K}, draw language-independent topic-concept distributions \u03c6 w k \u223c Dir(\u03b2 w ) over pairs (w a , w b ) \u2208 C. \u2022 For topic indices k \u2208 {1, ..., K} and languages \u2208 {a, b}, draw language-specific perspective-term distributions \u03c6 ,o k \u223c Dir(\u03b2 o ) over perspective-terms in language .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Draw multinomial distributions:", |
| "sec_num": "2." |
| }, |
| { |
| "text": "d \u2208 {1, ..., D} with lang. d : \u2022 Draw topic weights \u03b8 d \u223c Dir(\u03b1) \u2022 For each topic-word index i \u2208 {1, ..., N w d } of document d: -Draw topic z i \u223c \u03b8 d -Draw topic concept c i = (w a , w b ) \u223c \u03c6 w z i , and select w d as the member of that pair corresponding to language d . \u2022 For each perspective-word index j \u2208 {1, ..., N o d } of document d: -Draw topic x j \u223c Uniform(z w 1 , ..., z w N o d ) -Draw perspective-word o j \u223c \u03c6 ,o x j", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each document", |
| "sec_num": "3." |
| }, |
| { |
| "text": "We have experimented with several variants of our model, in order to account for the translation of polysemous words, adapt the translation model to the corpus used, and to handle words for which no translation is found. a) SINGLE variants of the model match each topic term in a language with at most one topic term in the other language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Variants", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "MULTIPLE variants allow each term to match to multiple other words in the other language. b) INFER variants allow higher-likelihood matchings to be inferred from the data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Variants", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "STATIC variants treat the matchings as fixed, which is equivalent to assigning a probability of 0 or 1 to every edge in our bipartite graph C. c) RELEGATE variants relegate all unmatched words in each language to a single separate background topic distinct from the topics that are learned for the matched topic words. This is akin to forcing the probability for currently unmatched words to 0 in all topics except for one, and forcing the probability of all currently matched words to 0 in this topic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Variants", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "INCLUDE variants do not restrict the assignment unmatched words; they are assigned to the same set of topics as the matched words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Variants", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We test the following six variants: SINGLESTATI- We do not test MULTI-PLEINFER variants because of the complexity of inferring a multiple matching in a bipartite graph.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Variants", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "For all variants, a collapsed Gibbs sampler can be used to infer topics \u03c6 ,o and \u03c6 w , per-document topic distributions \u03b8, as well as topic assignments z and x. This corresponds to the S-step below. For INFER variants, we follow Boyd-Graber and Blei in using an M-step involving a bipartite graph matching algorithm to infer the matching m that maximizes the posterior likelihood of the matching. S-Step: Sample topics for words in the corpus using a collapsed Gibbs sampler. For topic-word w i = u belonging to document d, if the word occurs in concept c i = (u, v), then sample the topic and entry according to:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning & Inference", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "p(z i = k, c i = (u, v) | w i = u, z \u2212i , C) \u221d N dk + \u03b1 k j (N dj + \u03b1 j ) \u00d7 N k(u,v) + \u03b2 w k v N k(u,v ) + \u03b2 w k", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning & Inference", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where the sum in the denominator of the first term is over all topics, and in the second term is over all words matched to u. N dk is the count of topic-words of topic", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning & Inference", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "k in document d, N k(u,v)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning & Inference", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "is the count of topic-words either of type u or of type v assigned to topic k in all the corpora. 2 For perspective-word o i = n, sample the topic according to:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning & Inference", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "p(z i = k|o i = n, z \u2212i , C) \u221d N dk j N dj \u00d7 N d kv + \u03b2 o k m N d km + \u03b2 o k", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning & Inference", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where the sum in the second term of the denominator is over the perspective-word vocabulary of language d ; N dk is the count of topic words in document d with topic k; and N d km is the count of perspectiveword m being assigned topic k in language d . Note that in all the counts above, the current word token i is omitted from the count.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning & Inference", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Given our sampling assignments, we can then estimate \u03b8 d , \u03c6 ,o , and \u03c6 w as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning & Inference", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u03b8 kd = N dk + \u03b1 k k (N dk + \u03b1 k ) , \u03c6 w k(u,v) = N k(u,v) + \u03b2 w (u,v) v N k(u,v ) + \u03b2 w (u,v ) , \u03c6 ,o nk = N kn + \u03b2 o n m N km + \u03b2 o n .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning & Inference", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Step: (for INFER variants only): Run the Jonker-Volgenant (Jonker and Volgenant, 1987) bipartite matching algorithm to find the optimal matching C given some weights. For topic-term u from language a and topic-term v from language b, our weights correspond to the log of the posterior odds that the occurrences of u and v come from a matched topic distribution, as opposed to coming from unmatched distributions:", |
| "cite_spans": [ |
| { |
| "start": 58, |
| "end": 86, |
| "text": "(Jonker and Volgenant, 1987)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "M-", |
| "sec_num": null |
| }, |
| { |
| "text": "\u00b5 u,v = k\\{a * ,b * } N k(u,v) log\u03c6 w k(u,v) \u2212 N u log\u03c6 w k(u,\u2022) \u2212 N v log\u03c6 w k(\u2022,v) + \u03c0 u,v", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "M-", |
| "sec_num": null |
| }, |
| { |
| "text": ", where N u is the count of topic-term u in the corpus. This expression can also be interpreted as a kind of pointwise mutual information (Haghighi et al., 2008) . The Jonker-Volgenant algorithm has time complexity of at most O(V 3 ), where V is the size of the lexicon (Jonker and Volgenant, 1987) .", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 161, |
| "text": "(Haghighi et al., 2008)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 270, |
| "end": 298, |
| "text": "(Jonker and Volgenant, 1987)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "M-", |
| "sec_num": null |
| }, |
| { |
| "text": "Having learned our model and inferred how likely perspective-terms are for a topic in a given language, we seek to know whether these perspectives differ significantly in the two languages. More precisely, can we infer whether word m in language a and the equivalent word n in language b have significantly different distributions under a topic k? To do this, we make the assumption that the perspective-words in languages a and b are in one-to-one correspondence to each other. Recall that, for a given topic k and language , N km is the count for term m and \u03c6 ,o k,m is the probability for word m in language . Just as we collect the probabilities into word-topic distribution vectors \u03c6 ,o k , we collect the counts into word-topic count vectors [N k1 , N k2 , ..]. Then, since our model assumes a prior over the parameter vectors \u03c6 ,o k , we can infer the likelihood for that observed word-topic counts N a km and N b kn were drawn from a single word-topic-distribution prior denoted by\u03c6 := \u03c6 a,o km = \u03c6 b,o kn . Below all our probabilities are conditioned implicitly on this event as well as on N a k and N b k being fixed. Denote the total count of word tokens in topic k from language by N k = m N km . Now, we derive the probability that we observe a ratio greater than \u03b4 between the proportion of words in topic k that belong to word type m in language a and to corresponding word type n in language b:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p N a km N a k N b k N b kn \u2265 \u03b4 + p N b kn N b k N a k N a km \u2265 \u03b4", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "By symmetry, it suffices to derive an expression for the first term. We note that the inequality in the probability is equivalent to a sum over a range of values of N a km and N b kn . By rearranging terms, applying the law of conditional probability to condition on the term\u03c6, and exploiting the conditional independence of N a km and N b km given\u03c6, N a k , and N b k , we can rewrite this first term as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "N b k x=0 N a k y=x\u03b4N a/b p(N b kn = x|\u03c6)p(N a km = y|\u03c6)p(\u03c6)d\u03c6,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "N a/b = N a k N b k . Recall that \u03c6 ,o k \u223c Dir(\u03b2 o )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "under our model. Assume a symmetric Dirichlet distribution for simplicity. It can then be shown that the marginal distribution of\u03c6 is\u03c6", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "\u223c Beta(\u03b2 o , (V \u2212 1)\u03b2 o ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where V is the total size of the perspectiveword vocabulary. Similarly, it can be shown that the marginal distribution of", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "N km given \u03c6 ,o k is N km \u223c Binom(N k , \u03c6 ,o", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "i ) for \u2208 {a, b}. Therefore, the integrand above is proportional to the beta-binomial distribution with number of trials N a k + N b k , successes x + y, and parameters \u03b2 o and (", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "V \u2212 1)\u03b2 o , but with partition function N a k y N b k x . Denote the PMF of this distribution by f (N a k +N b k , x+y, \u03b2 o ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Then expression (1) above becomes:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "N b k x=0 N a k y=x\u03b4N a/b f (N a k + N b k , x + y, \u03b2 o ) + N a k x=0 N b k y=x\u03b4N b/a f (N a k + N b k , x + y, \u03b2 o ). (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We cannot observe N a kb , N b kn , N a k and N b k explicitly, but we can estimate them by obtaining posterior samples from our Gibbs sampler. We substitute these estimates into expression (2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Perspective-Word Contrasts", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Twitter Data. We gathered Twitter data in English, Spanish and Russian during the first two weeks of December 2013 using the Twitter API. Following previous work (Puniyani et al., 2010) , we treated each Twitter user account as a document. We then tagged each document for part-ofspeech, and divided the word tokens in it into topicwords and perspective-words. We constructed a lexicon of 2,000 topic terms and 1,500 perspectiveterms for each language by filtering out any terms that occurred in more than 10% of the documents in that language, and then selecting the remaining terms with the highest frequency. Finally, we kept only documents that contained 4 or more topic words from our lexicon. This left us with 847,560 documents in English (4,742,868 topic-word and 1,907,685 perspective-word tokens); 756,036 documents in Spanish (4,409,888 topicword and 1,668,803 perspective-word tokens); and 260,981 documents in Russian (1,621,571 topicword and 981,561 perspective-word tokens). News Data. We gathered all the articles published online during the year 2013 by the state-run media agencies of the United States (Voice of America or \"VOA\"-English), Russia (RIA Novosti or \"RIA\"-Russian), and Venezuela (Agencia Venezolana de Noticias or \"AVN\"-Spanish). These three news agencies were chosen because they not only provide media in three distinct languages, but they are guided by the political world-views of three distinct governments. We treated each news article as a document, and removed duplicates. Once again, we constructed a lexicon of 2,000 topic terms and 1,500 perspective-terms using the same criteria as for Twitter, and kept only documents that contained 4 or more topic words from our lexicon. This left us with 23,159 articles (10,410,949 tokens) from VOA, 41,116 articles (11,726,637 tokens) from RIA, and 8,541 articles (2,606,796 tokens) from AVN.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 185, |
| "text": "(Puniyani et al., 2010)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments 4.1 Data", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Dictionaries. To create the translation dictionaries, we extracted translations from the English, Spanish, and Russian editions of Wiktionary, both from the translation sections and the gloss sections if the latter contained single words as glosses. Multiword expressions were universally removed. We added inverse translations for every original translation. From the resulting collection of translations, we then created separate translation dictionaries for each language and part-of-speech tag combination.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments 4.1 Data", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In order to give preference to more important translations, we assigned each translation an initial weight of 1 + 1 r , where r was the rank of the translation within the page. Since a translation (or its inverse) can occur on multiple pages, we aggregated these initial weights and then assigned final weights of 1 + 1 r , where r was the rank after aggregation and sorting in descending order of weights.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments 4.1 Data", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To evaluate the different variants of our model, we held out 30,000 documents (test set) during training. We plugged in the estimates of \u03c6 w and C acquired during training using the rest of the corpus to produce a likelihood estimate for these held-out documents. All models were initialized with the prior matching determined by the dictionary data. For each number of topics K, we set \u03b1 to 50/Kand the \u03b2 variables to 0.02, as in Fang et al. (2012) . For the MULTIPLE variants, we set \u03c0 i,j = 1 if i and j share an entry and 0 otherwise. For INFER variants, only three M -steps were performed to avoid overfitting, at 250, 500, and 750 iterations of Gibbs sampling, following the procedure in Boyd-Graber and Blei (2009) .", |
| "cite_spans": [ |
| { |
| "start": 431, |
| "end": 449, |
| "text": "Fang et al. (2012)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 694, |
| "end": 721, |
| "text": "Boyd-Graber and Blei (2009)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Conditions", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In order to compare the variants of our model, we computed the perplexity and coherence for each variant on TWITTER and NEWS, for English-Spanish and English-Russian language pairs. Perplexity is a measure of how well a model trained on a training set predicts the co-occurrence of words on an unseen test set H. Lower perplexity indicates better model fit. We evaluate the held-out perplexity for topic words w i and perspective-words o i separately. For topic words, the perplexity is defined as exp(\u2212 w i \u2208H logp(w i )/N w ). As for standard LDA, exact inference of p(w i ) is intractable under this model. Therefore we adapted the estimator developed by Murray and Salakhutdinov (2009) to our models.", |
| "cite_spans": [ |
| { |
| "start": 658, |
| "end": 689, |
| "text": "Murray and Salakhutdinov (2009)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of model variants", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Coherence is a measure inspired by pointwise mutual information (Newman et al., 2010) . Let D(v) be the the number of documents with at least one token of type v and let D(v, w) be the number of documents containing at least one token of type v and at least one token of type w. Then Mimno et al. (2011) define the coherence of topic k as", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 85, |
| "text": "(Newman et al., 2010)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 284, |
| "end": 303, |
| "text": "Mimno et al. (2011)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of model variants", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "1 M 2 M m=2 m\u22121 =1 log D(v (k) m , v (k) ) + D(v (k) ) , where V (k) = (v (k) 1 , ..., v", |
| "eq_num": "(k)" |
| } |
| ], |
| "section": "Comparison of model variants", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "M ) is a list of the M most probable words in topic k and is a small smoothing constant used to avoid taking the logarithm of zero. Mimno et al. (2011) find that coherence correlates better with human judgments than do likelihoodbased measures. Coherence is topic-specific measure, so for each model variant we trained, we computed the median topic coherence across all the topics learned by the model. We set = 0.1. Model performance and analysis. Fig. 2 shows perplexity for the variants as a function of the number of iterations of Gibbs sampling on the English-Spanish NEWS corpus. The figure confirms that 1000 iterations of Gibbs sampling on the NEWS corpus was sufficient for convergence across model variants. We omit figures for English-Russian and for the TWITTER corpus, since the patterns were nearly identical. Figure 3 shows how perplexity varies as a function of the number of topics. We used this information to choose optimal models for the different corpora. The optimal number of topics was K = 175 for the English-Spanish NEWS corpus, K = 200 for the English-Russian NEWS, K = 325 for the English-Spanish TWITTER, and K = 300 for the English-Russian TWITTER. Although the optimal number of topics varied across corpora, the relative performance of the different models was the same. In all of our corpora, the MULTIPLE variants provided better fits than their corresponding SINGLE variants. There are several explanations for this. For one, the MULTIPLE variants are able to exploit the information from multiple translations, unlike the SINGLE variants, which discarded all but one translation per word. For another, the matchings produced by the SINGLEINFER variants can be purely coincidental and the result of overfitting (see some examples below). INCLUDE variants performed markedly better than RELEGATE variants. INFER variants improved model fit compared to STATIC variants, but required more topics to produce optimal fit.", |
| "cite_spans": [ |
| { |
| "start": 132, |
| "end": 151, |
| "text": "Mimno et al. (2011)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 449, |
| "end": 455, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 824, |
| "end": 832, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of model variants", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Recall that we performed an M-step in the IN-FER variants 3 times, at 250, 500, and 750 iterations. As noted in \u00a73.3, the M-step in the INFER variants maximizes the posterior likelihood of the matching. However, Fig. 2 shows that this maximization causes held-out perplexity to increase substantially just after the first matching M-step, around 250 iterations, before decreasing again after about 50 more iterations of Gibbs sampling. We believe that this happens because the M-step is maximizing over expectations that are approximate, since they are estimated using Gibbs sampling. If the sampler has not yet converged, then the M-step's maximization will be unstable. We found support for this explanation when we re-ran the INFER variants using 1000 iterations between M-steps, giving the Markov chain enough time to converge. After this change, perplexity went down immediately after the M-step and kept decreasing monotonically, rather than increasing after the M-step before decreasing. However, this did not result in a significantly lower final perplexity or coherence and thus did not change the relative performance of the models. In addition, Fig. 2 suggests that the second and third M-steps (at 500 and 750 iterations, respectively) had little effect on perplexity. In light of the high computational expense of each inference step, this suggests in practice a single inference step may be sufficient. Fig. 4 shows that the MULTIPLESTATICINCLUDE variant was also the superior model as measured by median topic coherence. Once again, this general pattern held true for the English-Russian pair and TWITTER corpora. Overall, the results show that MULTIPLESTATICINCLUDE provides superior performance across measures, corpora, topic numbers, and languages. We therefore used this variant in further data analysis and evaluation. Incidentally, the observed decrease in topic coherence as K increases is expected, because as K increases, lowerlikelihood topics tend to be more incoherent (Mimno et al., 2011) . Experiments by Stevens et al. (2012) show that this effect is observed for LDA-, NMF-, and SVD-based topic models.", |
| "cite_spans": [ |
| { |
| "start": 1998, |
| "end": 2018, |
| "text": "(Mimno et al., 2011)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 2036, |
| "end": 2057, |
| "text": "Stevens et al. (2012)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 212, |
| "end": 218, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1156, |
| "end": 1163, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1418, |
| "end": 1424, |
| "text": "Fig. 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of model variants", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Cross-linguistic matchings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of model variants", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The matchings inferred by the SINGLEINFERINCLUDE variant were of mixed quality. Some of the matchings corrected low-quality translations in the original dictionary. For instance, our prior dictionary matched passage in English to pasaje in Spanish. Though technically correct, the dominant meaning of pasaje is [travel] ticket. The TWITTER model correctly matched passage to ruta instead. Many of the matchings learned by the model did not provide technically correct translations, yet were still revelatory and interesting. For instance, the dictionary translated the Spanish word pito as cigarette in English. However, in informal usage this word refers specifically to cannabis cigarettes, not tobacco cigarettes. The TWITTER model matches pito to the English slang word weed instead. The Spanish word Siria (Syria) was unmatched in the prior dictionary; the NEWS model matched it to the word chemical, which makes sense in the context of extensive reporting of the usage of chemical weapons in the ongoing Syrian conflict.", |
| "cite_spans": [ |
| { |
| "start": 311, |
| "end": 319, |
| "text": "[travel]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of model variants", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We have conducted a qualitative analysis of the topics, perspectives and contrasts produced by our models for English-Spanish and English-Russian, TWITTER and NEWS datasets. While the topics were coherent and consistent across languages, sets of perspective words manifested systematic differences revealing interesting cross-cultural contrasts. Fig. 5 and 7 show the top perspective words discovered by the model for the topic of finance and economy in English and Spanish NEWS and TWITTER corpora, respectively. While some of the perspective words are neutral, mostly literal and occur in both English and Spanish (e.g. balance or authorize), many others represent metaphorical vocabulary (e.g. saddle, gut, evaporate in English, or incendiar, sangrar, abatir in Spanish) pointing at distinct models of conceptualization of the topic. When we applied the contrast detection method (described in \u00a73.4) to these perspective words, it highlighted the differences in metaphorical perspectives, rather than the literal ones, as shown in Fig. 6 and 8 . En- glish speakers tend to discuss economic and financial processes using motion terms, such as \"slow, drive, boost or sluggish\", or a related metaphor of horse-riding, e.g. \"rein in debt\", \"saddle with debt\", or even \"breed money\". In contrast, Spanish speakers tend to talk about the economy in terms of size rather than motion, using verbs such as ampliar or disminuir, and other metaphors, such as sangrar (to bleed) and incendiar (to light up). These examples demonstrate coherent conceptualization patterns that differ in the two languages. Interestingly, this difference manifested itself in both NEWS and TWITTER corpora and echoes the findings of a previous corpus-linguistic study of Charteris-Black and Ennis (2001) , who manually analysed metaphors used in English and Spanish financial discourse and reported that motion and navigation metaphors that abound in English were rarely observed in Spanish.", |
| "cite_spans": [ |
| { |
| "start": 1743, |
| "end": 1775, |
| "text": "Charteris-Black and Ennis (2001)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 346, |
| "end": 358, |
| "text": "Fig. 5 and 7", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 1034, |
| "end": 1046, |
| "text": "Fig. 6 and 8", |
| "ref_id": "FIGREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data analysis and discussion", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "For the majority of the topics we analysed the model revealed interesting cross-cultural differences. For instance, the Spanish corpora exhibited metaphors of battle when talking about poverty (with poverty seen as an enemy), while in the English corpus poverty was discussed more neutrally as a social problem that needs a practical solution. English-Russian NEWS experiments revealed a surprising difference with respect to the topic of protests. They suggested that while US media tend to use stronger metaphorical vocabulary, such as Topic EN budget debt deficit reduction spend balance cut increase limit downtown tax stress addition planet Topic ES presupuesto deficit deuda reduccion equilibrio disminucion gasto aumentacion tasa sacerdote Perspective EN balance default triple rein accumulate accrue trim incur saddle slash prioritize avert gut burden evaporate borrow pile cap cut tackle Perspective ES renegociar mejora etiquetado desplomar recortar endeudar incendiar destinar asignar autorizar aprobado ascender sangrar augurar abatir clash, erupt or fire, in Russian protests are discussed more neutrally. Generally, the NEWS corpora contained more abstract topics and richer information about conceptual structure and sentiment in all languages. Many of the topics discovered in TWIT-TER related to everyday concepts, such as pets or concerts, with fewer topics covering societal issues. Yet, a few TWITTER-specific contrasts could be observed: e.g., the sports topic tends to be discussed using war and battle vocabulary in Russian to a greater extent than in English.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data analysis and discussion", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Our models tend to identify two general kinds of differences: (1) cross-corpus differences representing world views of particular populations whom the corpora characterize (such differences exist both across and within languages, e.g. the metaphors used in the progressive New York Times would be different from the ones in the more conservative Wall Street Journal); and (2) deeply entrenched crosslinguistic differences, such as the motion versus expansion metaphors for the economy in English and Spanish. Such systematic cross-linguistic contrasts can be associated with contrastive behavioural patterns across the different linguistic communities (Casasanto and Boroditsky, 2008; Fuhrman et al., 2011) . In both NEWS and TWITTER data, our model effectively identifies and summarises such contrasts simplifying the manual analysis of the data Topic EN economy growth rate percent bank economist interest reserve market policy Topic ES econom\u00eda crecimiento tasa banco poltica mercado inter\u00e9s inflacin empleo economista Perspective EN economic financial grow global expect remain cut boost low slow drive Perspective ES econ\u00f3mico mundial agregar financiero informal peque\u00f1o significar interno bajar Figure 7 : Top perspectives in system output for the economy topic in TWITTER (metaphors in red). by highlighting linguistic trends that are indicative of the underlying conceptual differences. However, the conceptual differences are not straightforward to evaluate based on the surface vocabulary alone. In order to investigate this further, we conducted a behavioural experiment testing a subset of the contrasts discovered by our model.", |
| "cite_spans": [ |
| { |
| "start": 652, |
| "end": 684, |
| "text": "(Casasanto and Boroditsky, 2008;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 685, |
| "end": 706, |
| "text": "Fuhrman et al., 2011)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1201, |
| "end": 1209, |
| "text": "Figure 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data analysis and discussion", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We assessed the relevance of the contrasts through an experimental study with native English-speaking and native Spanish-speaking human subjects. We focused on a linguistic difference in the metaphors used by English speakers versus Spanish speakers when discussing changes in a nation's economy. While English speakers tend to use metaphors involving both locative motion verbs (e.g. slow) as well as expansive/contractive motion verbs (e.g. shrink), Spanish speakers preferentially employ expansive/contractive motion verbs (e.g. disminuir) to describe changes in the economy. These differences could reflect linguistic artefacts (such as collocation frequencies) or could reflect entrenched conceptual differences. Our experiment addresses the question of whether such patterns of behaviour arise crosslinguistically in response to non-linguistic stimuli. If the linguistic differences are indicative of entrenched conceptual differences, then we expect to see responses to the non-linguistic stimuli that correspond to the usage differences in the two languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Behavioural evaluation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We recruited 60 participants from one Englishspeaking country (the US) and 60 participants from three Spanish-speaking countries (Chile, Mexico, and Spain) using the CrowdFlower crowdsourcing platform. Participants first read a brief description of the experimental task, which introduced them to a fictional country in which economists are devising a simple but effective graphic for \"representing change in [the] economy\". They then completed a demographic questionnaire including information about their native language. Results from 9 US and 3 non-US participants were discarded for failure to meet the language requirement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental setup", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Participants navigated to a new page to complete the experimental task. Stimuli were presented in a 1200 \u00d7 700-pixel frame. The center of the frame contained a sphere with a 64-pixel diameter. For each trial, participants clicked on a button to activate an animation of the sphere which involved (1) a positive displacement (in rightward pixels) of 10% or 20%, or a negative displacement (in leftward pixels) of 10% or 20%; 3 and, (2) an expansion (in increased pixel diameter) of 10% or 20%, or a contraction (in decreased pixel diameter) of 10% or 20%. 4 Participants saw each of the resulting conditions 3 times. The displacement and size conditions were drawn from a random permutation of 16 conditions using a Fisher-Yates shuffle (Fisher and Yates, 1963) . Crucially, half of the stimuli contained conflicts of information with respect to the size and displacement metaphors for economic change (e.g. the sphere could both grow and move to the left). Overall we expected the Spanish speakers' responses to be more closely associated with changes in diameter due to the presence and salience of the size metaphor, and the English speakers' responses to be influenced by both conditions. We expected these differences to be most prominent in the con- flicting trials, which force English speakers (unlike Spanish speakers) to choose between two available metaphors. We focus on these conflicting trials in our analysis and discussion of the results.", |
| "cite_spans": [ |
| { |
| "start": 555, |
| "end": 556, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 736, |
| "end": 760, |
| "text": "(Fisher and Yates, 1963)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental setup", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In trials in which stimuli moving rightward were simultaneously contracting, English speakers responded that the economy improved 66% of the time, whereas Spanish speakers judged the economy to have improved 43% of the time. In trials in which stimuli moving leftward were simultaneously expanding, English speakers judged the economy to have improved 34% of the time, and Spanish speakers responded that the economy improved 55% of the time. The results are illustrated in Figure 9 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 474, |
| "end": 482, |
| "text": "Figure 9", |
| "ref_id": "FIGREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "These results indicate three effects: (1) English speakers exhibit a pronounced bias for using horizontal displacement rather than expansion/contraction during the decision-making process; (2) Spanish speakers are more biased toward expansion/contraction in formulating a decision; and, (3) across the two languages the responses show contrasting patterns. The results support our expectation on the relevance of different metaphors when reasoning about the economy by the English and Spanish speakers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "To examine the significance of these effects, we fit a binary logit mixed effects model 5 to the data. The full analysis modeled judgment with native language, displacement, and size as fully crossed fixed effects and participant as a random effect. This analysis confirmed that native language was associated with judgments about economic change. In particular, it indicated that changes in size affected English speakers' judgments and Spanish speakers' judgments differently (p < 0.001), with an increase in size increasing the odds (e \u03b2 = 2.5) of a judgment of IMPROVED by Spanish speakers and decreasing the odds (e \u03b2 = 0.44) of a judgment of IMPROVED by English speakers. A Type II Wald test revealed the interaction between language and size to be highly statistically significant (\u03c7 2 (1) < 0.001).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In summary, the patterns we see in the behavioural data are consistent with the patterns uncovered in the output of our model. While much territory remains to be investigated to delimit the nature of this relationship, our results represent a first step toward establishing an association between information mined from large textual data collections and information observed through behavioural responses on a human scale.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We presented the first model that detects common topics from multilingual, non-parallel data and automatically uncovers differences in perspectives on these topics across linguistic communities. Our data analysis and behavioural evaluation offer evidence of a symbiotic relationship between ecologically sound corpus experiments and scientifically controlled human subject experiments, paving the way for the use of large-scale text mining to inform cognitive linguistics and psychology research.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We believe that our model represents a good foundation for future projects in this area. A promising area for further work is in developing better methods for identifying contrasts in perspective terms. This could perhaps involve modifying the generative process for perspective terms or incorporating syntactic dependency information. It would also be interesting to investigate the effect of dictionary quality and corpus size on the relative performance of STATIC and INFER variants. Finally, we note that the model can be applied to identify contrastive perspectives in monolingual as well as multilingual data, providing a general tool for the analysis of subtle, yet important, cross-population differences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "This approximation was adopted for convenience, computational efficiency and ease of interpretation. However, in principle our method does not depend on it, since it can be applied with all content words as topic or perspective words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In RELEGATE variants, for u unmatched zi is sampled as:p(zi = k|wi = u, z\u2212i, C) \u221d N dk + \u03b1 k k (N dk + \u03b1 k ) ,which can be seen as \u03b2 w u\u2022 \u2192 \u221e for unmatched terms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The use of leftward/rightward horizontal displacement to represent decreases/increases in magnitude is supported by research in numerical cognition showing that people associate smaller magnitudes with the left side of space and larger magnitudes with the right side(Dehaene, 1992;Fias et al., 1995).4 A demonstration of the English experimental interface can be accessed at http://goo.gl/W3YVfC. The Spanish interface is identical, but for a direct translation of the guidelines provided by a native Spanish/fluent English speaker.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "See Fox and Weisberg (2011) for a discussion of such models including application of the Type II Wald test.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the anonymous reviewers as well as the TACL editors, Sharon Goldwater and David Chiang, for helpful comments on an earlier draft of this paper. This work used the Extreme Science and Engineering Discovery Environment (XSEDE), which is supported by National Science Foundation grant number ACI-1053575. Ekaterina Shutova's research is supported by the Leverhulme Trust Early Career Fellowship. Gerard de Melo's research is supported by China 973 Program Grants 2011CBA00300, 2011CBA00301, and NSFC Grants 61033001, 61361136003, 61550110504.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Subgroup detection in ideological discussions", |
| "authors": [ |
| { |
| "first": "Amjad", |
| "middle": [], |
| "last": "Abu-Jbara", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| }, |
| { |
| "first": "Pradeep", |
| "middle": [], |
| "last": "Dasigi", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [], |
| "last": "Radev", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Long Papers", |
| "volume": "1", |
| "issue": "", |
| "pages": "399--409", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amjad Abu-Jbara, Mona Diab, Pradeep Dasigi, and Dragomir Radev. 2012. Subgroup detection in ideo- logical discussions. In Proceedings of the 50th Annual Meeting of the Association for Computational Linguis- tics: Long Papers -Volume 1, ACL '12, pages 399- 409, Stroudsburg, PA, USA. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Staying informed: Supervised and semi-supervised multi-view topical analysis of ideological perspective", |
| "authors": [ |
| { |
| "first": "Amr", |
| "middle": [], |
| "last": "Ahmed", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "P" |
| ], |
| "last": "Xing", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing, EMNLP '10", |
| "volume": "", |
| "issue": "", |
| "pages": "1140--1150", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amr Ahmed and Eric P. Xing. 2010. Staying in- formed: Supervised and semi-supervised multi-view topical analysis of ideological perspective. In Pro- ceedings of the 2010 Conference on Empirical Meth- ods in Natural Language Processing, EMNLP '10, pages 1140-1150, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "OpinioNetIt: Understanding the Opinions-People network for politically controversial topics", |
| "authors": [ |
| { |
| "first": "Rawia", |
| "middle": [], |
| "last": "Awadallah", |
| "suffix": "" |
| }, |
| { |
| "first": "Maya", |
| "middle": [], |
| "last": "Ramanath", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerhard", |
| "middle": [], |
| "last": "Weikum", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 20th ACM International Conference on Information and Knowledge Management, CIKM '11", |
| "volume": "", |
| "issue": "", |
| "pages": "2481--2484", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rawia Awadallah, Maya Ramanath, and Gerhard Weikum. 2011. OpinioNetIt: Understanding the Opinions-People network for politically controversial topics. In Proceedings of the 20th ACM International Conference on Information and Knowledge Manage- ment, CIKM '11, pages 2481-2484, New York, NY, USA. ACM.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Latent Dirichlet allocation", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [ |
| "M" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "I" |
| ], |
| "last": "Jordan", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "993--1022", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David M. Blei, Andrew Y. Ng, and Michael I. Jordan. 2003. Latent Dirichlet allocation. Journal of Machine Learning Research, 3:993-1022.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Multilingual topic models for unaligned text", |
| "authors": [ |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boyd", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "M" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Twenty-Fifth Conference on Uncertainty in Artificial Intelligence (UAI '09)", |
| "volume": "", |
| "issue": "", |
| "pages": "75--82", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jordan Boyd-Graber and David M. Blei. 2009. Multilin- gual topic models for unaligned text. In Proceedings of the Twenty-Fifth Conference on Uncertainty in Ar- tificial Intelligence (UAI '09), pages 75-82. Arlington, VA, USA: AUAI Press.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Holistic sentiment analysis across languages: multilingual supervised latent Dirichlet allocation", |
| "authors": [ |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boyd", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "45--55", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jordan Boyd-Graber and Philip Resnik. 2010. Holistic sentiment analysis across languages: multilingual su- pervised latent Dirichlet allocation. In Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing, pages 45-55.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Time in the mind: Using space to think about time", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Casasanto", |
| "suffix": "" |
| }, |
| { |
| "first": "Lera", |
| "middle": [], |
| "last": "Boroditsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Cognition", |
| "volume": "106", |
| "issue": "2", |
| "pages": "579--593", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Casasanto and Lera Boroditsky. 2008. Time in the mind: Using space to think about time. Cognition, 106(2):579-593.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A comparative study of metaphor in Spanish and English financial reporting. English for Specific Purposes", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Charteris", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Ennis", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "", |
| "volume": "20", |
| "issue": "", |
| "pages": "249--266", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan Charteris-Black and Timothy Ennis. 2001. A comparative study of metaphor in Spanish and En- glish financial reporting. English for Specific Pur- poses, 20:249-266.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Varieties of numerical abilities", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Stanislas Dehaene", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Cognition", |
| "volume": "44", |
| "issue": "", |
| "pages": "1--42", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stanislas Dehaene. 1992. Varieties of numerical abili- ties. Cognition, 44:1-42.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "MavenRank: Identifying influential members of the US senate using lexical centrality", |
| "authors": [ |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Fader", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [], |
| "last": "Radev", |
| "suffix": "" |
| }, |
| { |
| "first": "Burt", |
| "middle": [ |
| "L" |
| ], |
| "last": "Monroe", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [ |
| "M" |
| ], |
| "last": "Quinn", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "658--666", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anthony Fader, Dragomir Radev, Burt L. Monroe, and Kevin M. Quinn. 2007. MavenRank: Identifying in- fluential members of the US senate using lexical cen- trality. In In Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Process- ing and Computational Natural Language Learning, pages 658-666.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Mining contrastive opinions on political texts using cross-perspective topic model", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "Luo", |
| "middle": [], |
| "last": "Si", |
| "suffix": "" |
| }, |
| { |
| "first": "Naveen", |
| "middle": [], |
| "last": "Somasundaram", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengtao", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Fifth ACM International Conference on Web Search and Data Mining (WSDM '12)", |
| "volume": "", |
| "issue": "", |
| "pages": "63--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi Fang, Luo Si, Naveen Somasundaram, and Zheng- tao Yu. 2012. Mining contrastive opinions on polit- ical texts using cross-perspective topic model. In Pro- ceedings of the Fifth ACM International Conference on Web Search and Data Mining (WSDM '12), pages 63-72, New York. New York: ACM.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "The importance of magnitude information in numerical processing: evidence from the SNARC effect", |
| "authors": [ |
| { |
| "first": "Wim", |
| "middle": [], |
| "last": "Fias", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Brysbaert", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Geypens", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "G\u00e9ry D'ydewalle", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Mathematical Cognition", |
| "volume": "2", |
| "issue": "1", |
| "pages": "95--110", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wim Fias, Marc Brysbaert, Frank Geypens, and G\u00e9ry d'Ydewalle. 1995. The importance of magnitude information in numerical processing: evidence from the SNARC effect. Mathematical Cognition, 2(1):95- 110.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Statistical Tables for Biological, Agricultural and Medical Research", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ronald", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Fisher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yates", |
| "suffix": "" |
| } |
| ], |
| "year": 1963, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronald A. Fisher and Frank Yates. 1963. Statistical Tables for Biological, Agricultural and Medical Re- search. Oliver and Boyd, Edinburgh.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "An R Companion to Applied Regression", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Fox", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanford", |
| "middle": [], |
| "last": "Weisberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Fox and Sanford Weisberg. 2011. An R Companion to Applied Regression. SAGE Publications, CA: Los Angeles.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "How linguistic and cultural forces shape conceptions of time: English and Mandarin time in 3D", |
| "authors": [ |
| { |
| "first": "Orly", |
| "middle": [], |
| "last": "Fuhrman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kelly", |
| "middle": [], |
| "last": "Mccormick", |
| "suffix": "" |
| }, |
| { |
| "first": "Eva", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Heidi", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dingfang", |
| "middle": [], |
| "last": "Shu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuaimei", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Lera", |
| "middle": [], |
| "last": "Boroditsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Cognitive Science", |
| "volume": "35", |
| "issue": "", |
| "pages": "1305--1328", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Orly Fuhrman, Kelly McCormick, Eva Chen, Heidi Jiang, Dingfang Shu, Shuaimei Mao, and Lera Boroditsky. 2011. How linguistic and cultural forces shape conceptions of time: English and Mandarin time in 3D. Cognitive Science, 35:1305-1328.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Modeling latent biographic attributes in conversational genres", |
| "authors": [ |
| { |
| "first": "Nikesh", |
| "middle": [], |
| "last": "Garera", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Yarowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "2", |
| "issue": "", |
| "pages": "710--718", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikesh Garera and David Yarowsky. 2009. Model- ing latent biographic attributes in conversational gen- res. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th Interna- tional Joint Conference on Natural Language Process- ing of the AFNLP: Volume 2 -Volume 2, ACL '09, pages 710-718, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Predicting legislative roll calls from text", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Sean", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "M" |
| ], |
| "last": "Gerrish", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Blei", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sean M. Gerrish and David M. Blei. 2011. Predict- ing legislative roll calls from text. In Proceedings of ICML.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Learning topics and positions from Debatepedia", |
| "authors": [ |
| { |
| "first": "Swapna", |
| "middle": [], |
| "last": "Gottipati", |
| "suffix": "" |
| }, |
| { |
| "first": "Minghui", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanchuan", |
| "middle": [], |
| "last": "Sim", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1858--1868", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Swapna Gottipati, Minghui Qiu, Yanchuan Sim, Jing Jiang, and Noah A. Smith. 2013. Learning topics and positions from Debatepedia. In Proceedings of the 2013 Conference on Empirical Methods in Natu- ral Language Processing, pages 1858-1868, Seattle, Washington, USA, October. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Learning bilingual lexicons from monolingual corpora", |
| "authors": [ |
| { |
| "first": "Aria", |
| "middle": [], |
| "last": "Haghighi", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Taylor", |
| "middle": [], |
| "last": "Berg-Kirkpatrick", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics, ACL-'08:HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "771--779", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aria Haghighi, Percy Liang, Taylor Berg-Kirkpatrick, and Dan Klein. 2008. Learning bilingual lexicons from monolingual corpora. In Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics, ACL-'08:HLT, pages 771-779, Colum- bus, Ohio, USA.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Studying the history of ideas using topic models", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Hall", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 2008 Conference on Empirical Methods in Natural Language processing", |
| "volume": "", |
| "issue": "", |
| "pages": "363--371", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Hall, Daniel Jurafsky, and Christopher D. Man- ning. 2008. Studying the history of ideas using topic models. In Proceedings of the 2008 Conference on Empirical Methods in Natural Language processing, pages 363-371. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Extracting multilingual topics from unaligned comparable corpora", |
| "authors": [ |
| { |
| "first": "Jagadeesh", |
| "middle": [], |
| "last": "Jagarlamudi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 32nd European Conference on Advances in Information Retrieval (ECIR'2010)", |
| "volume": "", |
| "issue": "", |
| "pages": "444--456", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jagadeesh Jagarlamudi and Hal Daum\u00e9 III. 2010. Ex- tracting multilingual topics from unaligned compara- ble corpora. In Cathal Gurrin, Yulan He, Gabriella Kazai, Udo Kruschwitz, and Suzanne Little, editors, Proceedings of the 32nd European Conference on Ad- vances in Information Retrieval (ECIR'2010), pages 444-456. Springer-Verlag, Berlin.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "I know what you did last summer\": Query logs and user privacy", |
| "authors": [ |
| { |
| "first": "Rosie", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Ravi", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Tomkins", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the Sixteenth ACM Conference on Conference on Information and Knowledge Management, CIKM '07", |
| "volume": "", |
| "issue": "", |
| "pages": "909--914", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rosie Jones, Ravi Kumar, Bo Pang, and Andrew Tomkins. 2007. \"I know what you did last summer\": Query logs and user privacy. In Proceedings of the Six- teenth ACM Conference on Conference on Information and Knowledge Management, CIKM '07, pages 909- 914, New York, NY, USA. ACM.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A shortest augmenting path algorithm for dense and sparse linear assignment problems", |
| "authors": [ |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Jonker", |
| "suffix": "" |
| }, |
| { |
| "first": "Anton", |
| "middle": [], |
| "last": "Volgenant", |
| "suffix": "" |
| } |
| ], |
| "year": 1987, |
| "venue": "Computing", |
| "volume": "38", |
| "issue": "4", |
| "pages": "325--340", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roy Jonker and Anton Volgenant. 1987. A shortest aug- menting path algorithm for dense and sparse linear as- signment problems. Computing, 38(4):325-340.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Introduction: Cultural variation in metaphor", |
| "authors": [ |
| { |
| "first": "Zolt\u00e1n", |
| "middle": [], |
| "last": "K\u00f6vecses", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "European Journal of English Studies", |
| "volume": "8", |
| "issue": "", |
| "pages": "263--274", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zolt\u00e1n K\u00f6vecses. 2004. Introduction: Cultural varia- tion in metaphor. European Journal of English Stud- ies, 8:263-274.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "The Little Blue Book: The Essential Guide to Thinking and Talking Democratic", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Lakoff", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisabeth", |
| "middle": [], |
| "last": "Wehling", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Lakoff and Elisabeth Wehling. 2012. The Lit- tle Blue Book: The Essential Guide to Thinking and Talking Democratic. Free Press, New York.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Polylingual topic models", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanna", |
| "middle": [ |
| "M" |
| ], |
| "last": "Wallach", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Naradowsky", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "2", |
| "issue": "", |
| "pages": "880--889", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Mimno, Hanna M. Wallach, Jason Naradowsky, David A. Smith, and Andrew McCallum. 2009. Polylingual topic models. In Proceedings of the 2009 Conference on Empirical Methods in Natural Lan- guage Processing: Volume 2, pages 880-889. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Optimizing semantic coherence in topic models", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanna", |
| "middle": [ |
| "M" |
| ], |
| "last": "Wallach", |
| "suffix": "" |
| }, |
| { |
| "first": "Edmund", |
| "middle": [], |
| "last": "Talley", |
| "suffix": "" |
| }, |
| { |
| "first": "Miriam", |
| "middle": [], |
| "last": "Leenders", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Mimno, Hanna M. Wallach, Edmund Talley, Miriam Leenders, and Andrew McCallum. 2011. Op- timizing semantic coherence in topic models. In Pro- ceedings of the 2011 Conference on Empirical Meth- ods in Natural Language Processing. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Fightin' words: Lexical feature selection and evaluation for identifying the content of political conflict", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Burt", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "P" |
| ], |
| "last": "Monroe", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [ |
| "M" |
| ], |
| "last": "Colaresi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Quinn", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Political Analysis", |
| "volume": "16", |
| "issue": "4", |
| "pages": "372--403", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Burt L. Monroe, Michael P. Colaresi, and Kevin M. Quinn. 2008. Fightin' words: Lexical feature selec- tion and evaluation for identifying the content of polit- ical conflict. Political Analysis, 16(4):372-403.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Evaluating probabilities under high-dimensional latent variable models", |
| "authors": [ |
| { |
| "first": "Iain", |
| "middle": [], |
| "last": "Murray", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [ |
| "R" |
| ], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1137--1144", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iain Murray and Ruslan R. Salakhutdinov. 2009. Evalu- ating probabilities under high-dimensional latent vari- able models. In Advances in Neural Information Pro- cessing Systems, pages 1137-1144.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Automatic evaluation of topic coherence", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Newman", |
| "suffix": "" |
| }, |
| { |
| "first": "Jey", |
| "middle": [ |
| "Han" |
| ], |
| "last": "Lau", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Grieser", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Newman, Jey Han Lau, Karl Grieser, and Timothy Baldwin. 2010. Automatic evaluation of topic coher- ence. In Proceedings of the 2010 Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technolo- gies. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Latent variable models of selectional preference", |
| "authors": [ |
| { |
| "first": "Diarmuid\u00f3", |
| "middle": [], |
| "last": "S\u00e9aghdha", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "435--444", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diarmuid\u00d3 S\u00e9aghdha. 2010. Latent variable models of selectional preference. In Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics, pages 435-444, Uppsala, Sweden. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Cross-cultural analysis of blogs and forums with mixed-collection topic models", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "Roxana", |
| "middle": [], |
| "last": "Girju", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "3", |
| "issue": "", |
| "pages": "1408--1417", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Paul and Roxana Girju. 2009. Cross-cultural analysis of blogs and forums with mixed-collection topic models. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Process- ing: Volume 3 -Volume 3, EMNLP '09, pages 1408- 1417, Stroudsburg, PA, USA. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Democrats, Republicans and Starbucks afficionados: user classification in Twitter", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Pennacchiotti", |
| "suffix": "" |
| }, |
| { |
| "first": "Ana-Maria", |
| "middle": [], |
| "last": "Popescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '11", |
| "volume": "", |
| "issue": "", |
| "pages": "430--438", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Pennacchiotti and Ana-Maria Popescu. 2011. Democrats, Republicans and Starbucks afficionados: user classification in Twitter. In Proceedings of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '11, pages 430-438.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Social links from latent topics in microblogs", |
| "authors": [ |
| { |
| "first": "Kriti", |
| "middle": [], |
| "last": "Puniyani", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Shay", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "P" |
| ], |
| "last": "Xing", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the NAACL/HLT 2010 Workshop on Computational Linguistics in a World of Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "19--20", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kriti Puniyani, Jacob Eisenstein, Shay Cohen, and Eric P. Xing. 2010. Social links from latent topics in mi- croblogs. In Proceedings of the NAACL/HLT 2010 Workshop on Computational Linguistics in a World of Social Media, pages 19-20. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "A latent variable model for viewpoint discovery from threaded forum posts", |
| "authors": [ |
| { |
| "first": "Minghui", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1031--1040", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minghui Qiu and Jing Jiang. 2013. A latent variable model for viewpoint discovery from threaded forum posts. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technolo- gies, pages 1031-1040, Atlanta, Georgia, June. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "A latent Dirichlet allocation method for selectional preferences", |
| "authors": [ |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Mausam", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [ |
| "Etzioni" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "424--434", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alan Ritter, Mausam Etzioni, and Oren Etzioni. 2010. A latent Dirichlet allocation method for selectional pref- erences. In Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics, pages 424-434. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Exploring topic coherence over many models and many topics", |
| "authors": [ |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Stevens", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Kegelmeyer", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Andrzejewski", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Buttler", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "952--961", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Keith Stevens, Philip Kegelmeyer, David Andrzejewski, and David Buttler. 2012. Exploring topic coherence over many models and many topics. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 952-961, Jeju Is- land, Korea.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Metaphors we think with: The role of metaphor in reasoning", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "Lera", |
| "middle": [], |
| "last": "Thibodeau", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Boroditsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "PLoS ONE", |
| "volume": "6", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul H. Thibodeau and Lera Boroditsky. 2011. Metaphors we think with: The role of metaphor in rea- soning. PLoS ONE, 6(2):e16782.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Learning subjective language", |
| "authors": [ |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Bruce", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Bell", |
| "suffix": "" |
| }, |
| { |
| "first": "Melanie", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Comput. Linguist", |
| "volume": "30", |
| "issue": "3", |
| "pages": "277--308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Janyce Wiebe, Theresa Wilson, Rebecca Bruce, Matthew Bell, and Melanie Martin. 2004. Learning subjective language. Comput. Linguist., 30(3):277-308, Septem- ber.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Basic generative model.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF1": { |
| "text": "Perplexity of different model variants for different numbers of iterations at K=175.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF2": { |
| "text": "Perplexity of different model variants.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF3": { |
| "text": "Coherence of different model variants.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF4": { |
| "text": "Top perspectives in system output for the topic of finance in the NEWS corpus (metaphors in red italics).", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF5": { |
| "text": "Contrasts identified by the model in NEWS.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF6": { |
| "text": "Contrasts identified by the model in TWITTER.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| }, |
| "FIGREF7": { |
| "text": "\"Economy Improved\" response rate in conflicting stimulus conditions.", |
| "type_str": "figure", |
| "uris": null, |
| "num": null |
| } |
| } |
| } |
| } |