| { |
| "paper_id": "S15-1001", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:38:32.120840Z" |
| }, |
| "title": "Neural Networks for Integrating Compositional and Non-compositional Sentiment in Sentiment Composition", |
| "authors": [ |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Research Council", |
| "location": { |
| "addrLine": "1200 Montreal Road", |
| "postCode": "M50, K1A 0R6", |
| "settlement": "Ottawa", |
| "region": "ON", |
| "country": "Canada, Canada" |
| } |
| }, |
| "email": "xiaodan.zhu@nrc-cnrc.gc.ca" |
| }, |
| { |
| "first": "Hongyu", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Research Council", |
| "location": { |
| "addrLine": "1200 Montreal Road", |
| "postCode": "M50, K1A 0R6", |
| "settlement": "Ottawa", |
| "region": "ON", |
| "country": "Canada, Canada" |
| } |
| }, |
| "email": "hongyu.guo@nrc-cnrc.gc.ca" |
| }, |
| { |
| "first": "Parinaz", |
| "middle": [], |
| "last": "Sobhani", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Ottawa", |
| "location": { |
| "addrLine": "800 King Edward Avenue Ottawa", |
| "postCode": "K1N 6N5", |
| "region": "ON", |
| "country": "Canada" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper proposes neural networks for integrating compositional and non-compositional sentiment in the process of sentiment composition, a type of semantic composition that optimizes a sentiment objective. We enable individual composition operations in a recursive process to possess the capability of choosing and merging information from these two types of sources. We propose our models in neural network frameworks with structures, in which the merging parameters can be learned in a principled way to optimize a well-defined objective. We conduct experiments on the Stanford Sentiment Treebank and show that the proposed models achieve better results over the model that lacks this ability.", |
| "pdf_parse": { |
| "paper_id": "S15-1001", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper proposes neural networks for integrating compositional and non-compositional sentiment in the process of sentiment composition, a type of semantic composition that optimizes a sentiment objective. We enable individual composition operations in a recursive process to possess the capability of choosing and merging information from these two types of sources. We propose our models in neural network frameworks with structures, in which the merging parameters can be learned in a principled way to optimize a well-defined objective. We conduct experiments on the Stanford Sentiment Treebank and show that the proposed models achieve better results over the model that lacks this ability.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Automatically determining the sentiment of a phrase, a sentence, or even a longer piece of text is still a challenging problem. Data sparseness encountered in such tasks often requires to factorize the problem to consider smaller pieces of component words or phrases, for which much research has been performed on bag-of-words or bag-of-phrases models (Pang and Lee, 2008; Liu and Zhang, 2012) . More recent work has started to model sentiment composition (Moilanen and Pulman, 2007; Choi and Cardie, 2008; Socher et al., 2012; Socher et al., 2013) , a type of semantic composition that optimizes a sentiment objective. In general, the composition process is critical in the formation of the sentiment of a span of text, which has not been well modeled yet and there is still scope for future work.", |
| "cite_spans": [ |
| { |
| "start": 352, |
| "end": 372, |
| "text": "(Pang and Lee, 2008;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 373, |
| "end": 393, |
| "text": "Liu and Zhang, 2012)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 456, |
| "end": 483, |
| "text": "(Moilanen and Pulman, 2007;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 484, |
| "end": 506, |
| "text": "Choi and Cardie, 2008;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 507, |
| "end": 527, |
| "text": "Socher et al., 2012;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 528, |
| "end": 548, |
| "text": "Socher et al., 2013)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Compositionality, or non-compositionality, of the senses of text spans is important for language understanding. Sentiment, as one of the major semantic differential categories (Osgood et al., 1957) , faces the problem as well. For example, the phrase must see or must try in a movie or restaurant review often indicates a positive sentiment, which, however, may be hard to learn from the component words. More extreme examples, e.g., slangs like bad ass, are not rare in social media text. This particular example can actually convey a very positive sentiment even though its component words are very negative. In brief, a sentiment composition framework that can consider both compositional and non-compositional sentiment is theoretically interesting.", |
| "cite_spans": [ |
| { |
| "start": 176, |
| "end": 197, |
| "text": "(Osgood et al., 1957)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "From a more pragmatical viewpoint, if one is able to reliably learn the sentiment of a text span (e.g., an ngram) holistically, it would be desirable that a composition model has the ability to decide the sources of knowledge it trusts more: the composition from the component words, the noncompositional source, or a soft combination of them. In such a situation, whether the text span is actually composable may be blur or may not be a concern.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In general, the composition of sentiment is a rather complicated process. As a glimpse of evidence, the effect of negation words on changing sentiment of their scopes appears to be a complicated function (Zhu et al., 2014) . The recently proposed neural networks (Socher et al., 2013; Socher et al., 2011) are promising, for their capability of modeling complicated functions (Mitchell, 1997) in 1 general, handling data sparseness by learning lowdimensional embeddings at each layer of composition, and providing a framework to optimize the composition process in principled way.", |
| "cite_spans": [ |
| { |
| "start": 204, |
| "end": 222, |
| "text": "(Zhu et al., 2014)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 263, |
| "end": 284, |
| "text": "(Socher et al., 2013;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 285, |
| "end": 305, |
| "text": "Socher et al., 2011)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 376, |
| "end": 392, |
| "text": "(Mitchell, 1997)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper proposes neural networks for integrating compositional and non-compositional sentiment in the process of sentiment composition. To achieve this, we enable individual composition operations in a recursive process to possess the capability of choosing and merging information from these two types of sources. We propose our models in neural network frameworks with structures (Socher et al., 2013) , in which the merging parameters can be learned in a principled way to optimize a welldefined objective. We conduct experiments on the Stanford Sentiment Treebank and show that the proposed models achieve better results over the model that does not consider this property.", |
| "cite_spans": [ |
| { |
| "start": 385, |
| "end": 406, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Composition of sentiment Early work on modeling sentiment does not examine semantic composition closely (Pang and Lee, 2008; Liu and Zhang, 2012) , as mentioned above. Recent work has considered sentiment-oriented semantic composition (Moilanen and Pulman, 2007; Choi and Cardie, 2008; Socher et al., 2012; Socher et al., 2013) , or simply called sentiment composition in this paper. For example, Moilanen and Pulman (2007) used a collection of handwritten compositional rules to assign sentiment values to different granularities of text spans. Choi and Cardie (2008) proposed a learning-based framework. The more recent work of (Socher et al., 2013) proposed models based on neural networks that do not rely on any heuristic rules. Such models work in a bottom-up fashion over a tree to infer the sentiment label of a phrase or sentence as a composition of the sentiment expressed by its constituting parts. The approach leverages a principled method, the forward and backward propagation, to optimize the system performance. In this paper, we follow the neural network approach to integrate compositional and non-compositional sentiment in sentiment composition.", |
| "cite_spans": [ |
| { |
| "start": 104, |
| "end": 124, |
| "text": "(Pang and Lee, 2008;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 125, |
| "end": 145, |
| "text": "Liu and Zhang, 2012)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 235, |
| "end": 262, |
| "text": "(Moilanen and Pulman, 2007;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 263, |
| "end": 285, |
| "text": "Choi and Cardie, 2008;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 286, |
| "end": 306, |
| "text": "Socher et al., 2012;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 307, |
| "end": 327, |
| "text": "Socher et al., 2013)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 546, |
| "end": 568, |
| "text": "Choi and Cardie (2008)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 630, |
| "end": 651, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Prior knowledge of sentiment Integrating noncompositional sentiment into the composition pro-cess can be viewed as introducing some prior sentiment knowledge, as in general the sentiment of a word or a phrase perceived independent of its context is often referred to as prior sentiment. Wordlevel prior sentiment is typically annotated in manual sentiment lexicons (Wilson et al., 2005; Hu and Liu, 2004; Mohammad and Turney, 2010) , or learned in an unsupervised or semisupervised way (Hatzivassiloglou and McKeown, 1997; Esuli and Sebastiani, 2006; Turney and Littman, 2003; Mohammad et al., 2009) . More recently, sentiment indicators, such as emoticons and hashtags, are utilized (Go et al., 2009; Davidov et al., 2010; Kouloumpis et al., 2011; Mohammad, 2012; Mohammad et al., 2013a) . With enough data, such freely available (but noisy) annotation can be used to learn the sentiment of ngrams. In our study, we will investigate in the proposed composition models the effect of automatically learned sentimental ngrams.", |
| "cite_spans": [ |
| { |
| "start": 365, |
| "end": 386, |
| "text": "(Wilson et al., 2005;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 387, |
| "end": 404, |
| "text": "Hu and Liu, 2004;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 405, |
| "end": 431, |
| "text": "Mohammad and Turney, 2010)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 486, |
| "end": 522, |
| "text": "(Hatzivassiloglou and McKeown, 1997;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 523, |
| "end": 550, |
| "text": "Esuli and Sebastiani, 2006;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 551, |
| "end": 576, |
| "text": "Turney and Littman, 2003;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 577, |
| "end": 599, |
| "text": "Mohammad et al., 2009)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 684, |
| "end": 701, |
| "text": "(Go et al., 2009;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 702, |
| "end": 723, |
| "text": "Davidov et al., 2010;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 724, |
| "end": 748, |
| "text": "Kouloumpis et al., 2011;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 749, |
| "end": 764, |
| "text": "Mohammad, 2012;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 765, |
| "end": 788, |
| "text": "Mohammad et al., 2013a)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this paper, we propose several neural networks that enable each composition operation to possess the ability of choosing and merging sentiment from lower-level composition and that from non-compositional sources. We call the networks Prior-Enriched Semantic Networks (PESN). We present several specific implementations based on RNTN (Socher et al., 2013) ; the latter has showed to be a state-of-the-art sentiment composition framework. However, the realization of a PESN node is not necessarily only tied with RNTN. Figure 1 shows a piece of PESN. Each of the three big nodes, i.e., N 1 , N 2 , and N 3 , corresponds to a node in a constituency parse tree; e.g., N 3 may correspond to the phrase not a must try, where N 1 and N 2 are not and a must try, respectively. We extend each of the nodes to possess the ability to consider sentiment from lower-level composition and non-compositional sources. In node N 3 , knowledge from the lower-level composition is represented in the hidden vector i 3 , which is merged with noncompositional knowledge represented in e 3 , and the merged information is saved in m 3 . The black box in the center performs the actual merging, which integrates the two knowledge sources in order to min-imize an overall objective function that we will discuss in detail later. The recursive neural networks and the forward-backward propagation over structures (Socher et al., 2013; Goller and Kchler, 1996) provide a principled way to optimize the whole network. Figure 1 : A prior-enriched semantic network (PESN) for sentiment composition. The three nodes, N 1 , N 2 , and N 3 , correspond to three nodes in a constituency parse tree, and each of them consider sentiment from lowerlevel composition (i 1 , i 2 , i 3 ) and from non-compositional sentiment (e 1 , e 2 , e 3 ).", |
| "cite_spans": [ |
| { |
| "start": 336, |
| "end": 357, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 1391, |
| "end": 1412, |
| "text": "(Socher et al., 2013;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 1413, |
| "end": 1437, |
| "text": "Goller and Kchler, 1996)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 520, |
| "end": 528, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1494, |
| "end": 1502, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Prior-enriched semantic networks", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The most straightforward way of implementing a PESN node is probably through a regular bilinear merging. Take node N 3 in Figure 1 as an example; the node vector m 3 will be simply merged from i 3 and e 3 as follows:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 122, |
| "end": 130, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Regular bilinear merging", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "m 3 = tanh(W m i 3 e 3 + b m )", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Regular bilinear merging", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Again, vector i 3 contains the knowledge from the lower-level composition; e 3 is a vector representing non-compositional sentiment information, which can be either from human annotation or automatically learned resources. Note that in the network, all hidden vectors m and i (including word embedding vectors) have the same dimensionality d, but the non-compositional nodes, i.e., the nodes e , do not necessarily have to have the same number of elements, and we let l be their dimensionality. The merging matrix W m is d-by-(d+l).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regular bilinear merging", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "As in this paper we discuss PESN in the framework of RNTN, computation outside the nodes N 1 , N 2 , N 3 follows that for the standard three-way tensors in RNTN. That is, the hidden vector i 3 is computed with the following formula:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regular bilinear merging", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "i 3 = tanh( m 1 m 2 T V [1:d] r m 1 m 2 + W r m 1 m 2 ) (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regular bilinear merging", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where, W r \u2208 R d\u00d7(d+d) and V r \u2208 R (d+d)\u00d7(d+d)\u00d7d are the matrix and tensor of the composition function used in RNTN, respectively, each of which is shared over the whole tree in computing vectors i 1 , i 2 , and i 3 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regular bilinear merging", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Compared to the regular bilinear merging model, we here further explicitly control the input of the compositional and non-compositional semantics. Explicitly gating neural network has been studied in the literature. For example, the long short-term memory (LSTM) utilizes input gates, together with output gates and forget gates, to guide memory blocks to remember/forget history (Hochreiter and Schmidhuber, 1997) . For our purpose here, we explore an input gate to explicitly control the two different input sources. As shown in Figure 2 , an additional gating layer g 3 is used to control i 3 , e 3 explicitly.", |
| "cite_spans": [ |
| { |
| "start": 380, |
| "end": 414, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 531, |
| "end": 539, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Explicitly gated merging", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "g 3 = \u03c3( \uf8ee \uf8f0 W ge e 3 W g i i 3 \uf8f9 \uf8fb + b g )", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Explicitly gated merging", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "m 3 = tanh(W m (g 3 \u2297 i 3 e 3 ) + b m )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Explicitly gated merging", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The sign \u2297 is a Hadamard product; \u03c3 is a logistic sigmoid function instead of a tanh activation, which makes the gating signal g 3 to be in the range of [0, 1] and serve as a soft switch (not a hard binary 0/1 switch) to explicitly gate i 3 and e 3 . Note that elsewhere in the network, we still use tanh as our activation function. In addition, W ge \u2208 R d\u00d7l and W g i \u2208 R l\u00d7d are the weight matrices used to calculate the gate vector.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Explicitly gated merging", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The third approach we use for merging compositional and non-compositional knowledge employs tensors, which are able to explore multiplicative combination among variables. Tensors have already been successfully used in a wide range of NLP tasks in capturing high-order interactions among variables. The forward computation of m 3 follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Confined-tensor-based merging", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "m 3 = tanh( i 3 e 3 T V [1:d] m i 3 e 3 + W m i 3 e 3 )", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Confined-tensor-based merging", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Confined-tensor-based merging", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "[1:d] m \u2208 R (d+l)\u00d7(d+l)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Confined-tensor-based merging", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u00d7d is the tensor m that defines multiple bilinear forms, and the matrix W m is as defined in the previous models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Confined-tensor-based merging", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "As we focus on the interaction between i 3 and e 3 , we force each slice of tensor, e.g. V m (k \u2208 {1...d}) and the bottom-left l-by-d block are non-zero parameters, used to capture multiplicative, element-pair interactions between i 3 and e 3 , while the rest block are set to be zero, to ignore interactions between those variables within i 3 and those within e 3 . This does not only make the model focus on the interaction between vector i and e, it also helps significantly reduce the number of parameters to estimate, which, otherwise, could potentially lead to overfitting. We call this model confined-tensor-based merging.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Confined-tensor-based merging", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Objective The overall objective function in learning PESN, following (Socher et al., 2013) , minimizes the cross-entropy error between the predicted distribution y sen i \u2208 R c\u00d71 at a node i and the target distribution t i \u2208 R c\u00d71 at that node, where c is the number of sentiment categories. PESN learns the parameters that are used to merge the compositional and non-compositional sentiment so that the merging operations integrate the two sources in minimizing prediction loss. The neural network over structures provides a principled framework to optimize these parameters.", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 90, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "More specifically, the error over an entire sentence is calculated as a regularized sum:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "E(\u03b8) = i j t i j logy sen i j + \u03bb \u03b8 2 (6)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where, \u03bb is the regularization parameter, j \u2208 c denotes the j-th element of the multinomial target distribution, \u03b8 are model parameters that will be discussed below, and i iterates over all nodes i x (e.g., i 1 , i 2 , and i 3 ) in Figure 1 , where the model predicts sentiment labels.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 232, |
| "end": 240, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Backpropagation over the structures To minimize E(\u03b8), the gradient of the objective function with respect to each of the parameters in \u03b8 is calculated efficiently via backpropagation through structure (Socher et al., 2013; Goller and Kchler, 1996) , after computing the prediction errors in forward propagation with formulas described above.", |
| "cite_spans": [ |
| { |
| "start": 201, |
| "end": 222, |
| "text": "(Socher et al., 2013;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 223, |
| "end": 247, |
| "text": "Goller and Kchler, 1996)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Regular bilinear merging The PESN implemented with simple bilinear merging has the following model parameters:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "\u03b8 = (V r , W r , W m , W label , L).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "As discussed above, V r and W r are the tensor and matrix in RNTN; W m is the weight matrix for merging the compositional and non-compositional sentiment vectors. L denotes the vector representations of the word dictionary, and W label is sentiment classification matrix used to predict sentiment label at a node. Backpropagation on the regular bilinear merging node follows a standard derivative computation in a regular feed-forward network, which we skip here.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Explicitly gated merging In this model, in addition to W m , we further learn two weight matrices W g i and W ge , as introduced in Formula 3 and 4 above. Consider Figure 2 and let \u03b4 m 3 denote the error messages passed down to node m 3 . The error messages are passed back to i 3 directly through the Hadamard product and also through the gate node g 3 . The former, denoted as \u03b4 i 3 ,dir , is calculated with:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 164, |
| "end": 172, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b4 i 3 ,dir = (\u03b4 m 3 \u2297 g 3 )[1 : d]", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where, g 3 is calculated with Formula 3 above in the forward process; [1 : d] means taking the first d elements of the vector yielded by the Hadamard product; the rest [d + 1 : d + l] elements of the Hadamard production are discarded, as we do not update e 3 , which is given as our prior knowledge. The error messages passed down to gate vector g 3 is computed with", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b4 g 3 = \u03b4 m 3 \u2297 i 3 e 3 \u2297 s (g 3 )", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where, s (.) is the element-wise derivative of logistic function, which can be calculated only using s(.), as s(.)(1 \u2212 s(.)). The derivative of W g i can be calculated with:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2202E g 3 W ge = (\u03b4 g 3 [1 : d])e T 3", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Similarly, partial derivatives over W g i can be calculated. These values will be summed to the total derivative of W g i and W ge , respectively. With these notations, the error messages passed down to i 3 through the gate can then be computed with:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b4 i 3 ,gate = W T g i (\u03b4 g 3 [d + 1 : d + l])", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "and the total error messages to node i 3 is then:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "\u03b4 i 3 ,total = (\u03b4 i 3 ,dir +\u03b4 i 3 ,gate +\u03b4 i 3 ,local )\u2297f (i 3 ) (11)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where \u03b4 i 3 ,local is the local error message from the sentiment prediction errors performed at the node i 3 itself to obtain the total error message for i 3 , which is in turn passed down through regular RNTN tensor to the lower levels. f (.) is the element-wise derivative of tanh function.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Confined-tensor-based merging In confined-tensorbased merging, the error messages passed to the two children i 3 and e 3 is computed with:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "\u03b4 i 3 ,e 3 = (W T m \u03b4 m 3 ) \u2297 f ( i 3 e 3 ) + \u03b4 tns (12)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b4 tns = d k=1 \u03b4 m3 k (V [k] m + (V [k] m ) T ) i 3 e 3 \u2297 f ( i 3 e 3 )", |
| "eq_num": "(13)" |
| } |
| ], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where the error messages to i 3 are the first d numbers of elements of \u03b4 i 3 ,e 3 . The rest elements of \u03b4 i 3 ,e 3 are discarded; as mentioned above, we do not update e 3 as it is given as the prior knowledge. We skip the derivative for the W m 3 . While the derivative of each slice k(k = 1, . . . , d) of the tensor V is calculated with:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2202E m 3 V [k] m = \u03b4 m 3 ,down k i 3 e 3 i 3 e 3 T", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Again, the full derivative for V m and W m is the sum of their derivatives over the trees. After the error message passing from m 3 to i 3 is obtained, it can be summed up with the local error message from the sentiment prediction errors at the node i 3 itself to obtain the total error message for i 3 , which is in turn used to calculate the error messages passed down as well as the derivative in the lower-level tree.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning and inference", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We use the Stanford Sentiment Treebank (Socher et al., 2013) in our experiments. The data contain about 11,800 sentences from the movie reviews that were originally collected by Pang and Lee (2005) .", |
| "cite_spans": [ |
| { |
| "start": 39, |
| "end": 60, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 178, |
| "end": 197, |
| "text": "Pang and Lee (2005)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The sentences were parsed with the Stanford parser (Klein and Manning, 2003) . Phrases at all the tree nodes were manually annotated with sentiment values. We use the same split of the training and test data as in (Socher et al., 2013) to predict the sentiment categories of the roots (sentences) and the phrases, and use the same evaluation metric, classification accuracy, to measure the performances.", |
| "cite_spans": [ |
| { |
| "start": 51, |
| "end": 76, |
| "text": "(Klein and Manning, 2003)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 214, |
| "end": 235, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In our experiments, we explore in sentiment composition the effect of two different types of noncompositional sentiment: (1) sentiment of ngrams automatically learned from an external, much larger corpus, and (2) sentiment of ngrams assigned by human annotators.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Obtaining non-compositional sentiment", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Following the method proposed in (Mohammad et al., 2013b) , we learn sentimental ngrams from Tweets. The unsupervised approach utilizes hashtags, which can be regarded as conveying freely available (but noisy) human annotation of sentiment. More specifically, certain words in tweets are specially marked with the hash character (#) to indicate the topic, sentiment polarity, or emotions such as joy, sadness, angry, and surprised. With enough data, such artificial annotation can be used to learn the sentiment of ngrams by their likelihood of cooccurring with such hashtagged words.", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 57, |
| "text": "(Mohammad et al., 2013b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Obtaining non-compositional sentiment", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "More specifically, a collection of 78 seed hashtags closely related to positive and negative such as #good, #excellent, #bad, and #terrible were used (32 positive and 36 negative). These terms were chosen from entries for positive and negative in the Roget's Thesaurus. A set of 775,000 tweets that contain at least a positive hashtag or a negative hashtag were used as the learning corpus. A tweet was considered positive if it had one of the 32 positive seed hashtags, and negative if it had one of the 36 negative seed hashtags. The association score for an ngram w was calculated from these pseudo-labeled tweets as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Obtaining non-compositional sentiment", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "score(w) = P M I(w, positive) \u2212 P M I(w, negative)", |
| "eq_num": "(15)" |
| } |
| ], |
| "section": "Obtaining non-compositional sentiment", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where PMI stands for pointwise mutual information, and the two terms in the formula calculate the PMI between the target ngram and the pseudo-labeled positive tweets as well as that between the ngram and the negative tweets, respectively. Accordingly, a positive score(.) indicates association with positive sentiment, whereas a negative score indicates association with negative sentiment. We use in our experiments the bigrams and trigrams learned from the dataset with the occurrences higher than 5. We assign these ngrams into one of the 5 bins according to their sentiment scores obtained with Formula 15: (\u2212\u221e, \u22122], (\u22122, \u22121], (\u22121, 1), [1, 2), and [2, +\u221e) . Each ngram is now given a one-hot vector, indicating the polarity and strength of its sentiment. For example, a bigram with a score of -1.5 will be assigned a 5-dimensional vector [0, 1, 0, 0, 0], indicating a weak negative. Note that PESN can also take into other forms of sentiment embeddings, such as those learned in (Tang et al., 2014) .", |
| "cite_spans": [ |
| { |
| "start": 640, |
| "end": 659, |
| "text": "[1, 2), and [2, +\u221e)", |
| "ref_id": null |
| }, |
| { |
| "start": 983, |
| "end": 1002, |
| "text": "(Tang et al., 2014)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Obtaining non-compositional sentiment", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In addition, the Stanford Sentiment Treebank contains manually annotated sentiment for each individual phrase in a parse tree, so we use such annotation but not other manual lexicons, by assuming such annotation fits the corpus itself the best. Specifically, we use bigram and trigram annotation in the treebank. Note that even longer ngrams are much sparser and probably less useful in general, one may learn sentiment for multi-word expressions of a larger length, which we will leave as future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Obtaining non-compositional sentiment", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Overall prediction performance Table 1 shows the accuracies of different models on Stanford Sentiment Treebank. We evaluate the models on 5category sentiment prediction at both the sentence (root) level and at all nodes (including roots). 1 The results reported in Table 1 RNTN models with the default parameter 4 and run the training from 5 different random initializations, and report the best results we observed. The rows in the table marked with auto are models using the automatically learned ngrams, and those marked with manu using manually annotated sentiment for bigrams and trigrams. Note that the noncompositional sentiment of a node is only used to predict the sentiment of phrases above it in the tree. For example, in Figure 1 discussed earlier, the effect of e 1 and e 2 will be used to predict the sentiment of i 3 and other node i above, but not that of i 1 and i 2 themselves, avoiding the concern of using the annotation of a tree node to predict the sentiment of itself.", |
| "cite_spans": [ |
| { |
| "start": 239, |
| "end": 240, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 31, |
| "end": 38, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 265, |
| "end": 272, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 733, |
| "end": 741, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The models in general benefit from incorporating the non-compositional knowledge. The numbers in the bold font are the best performance achieved on the two tasks. While using the simple regular bilinear merging shows some gains, the more complicated models achieve further improvement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Above we have seen the general performance of the models. Below, we take a closer look at the prediction errors at different depths of the sentiment treebank. The depth here is defined as the longest distance between a tree node and its descendant leafs. In Figure 3 , the x-axis corresponds to different depths and y-axis is the accuracy. The figure was drawn with the RNTN and the model (7) in Table 1 , so as to study the compositional property in the ideal situation where the lexical has a full coverage of bigrams and trigrams. The figure shows that using the confined tensor to combine holistic sentiment information outperforms the original RNTN model that does not consider this, starting from depth 3, showing the benefit of using holistic bigram sentiment. The improvement increases at depth 4 (indicating the benefit of using trigram sentiment), and then was propagated to the higher levels of the tree. As discussed above, we only use non-compositional sentiment of a node to predict the sentiment of the phrases above it in the tree but not the node itself. And the system still needs to balance which source it trusts more, by optimizing the overall objective.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 258, |
| "end": 266, |
| "text": "Figure 3", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 396, |
| "end": 403, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Although the empirical improvement may depend on the percentage of non-compositional instances in a data set or the sentiment that need to be learned holistically, we present here the first effort, according to our knowledge, on studying the concern of in-tegrating compositional and non-compositional sentiment in the semantic composition process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "This paper proposes models for integrating compositional and non-compositional sentiment in the process of sentiment composition. To achieve this, we enable each composition operation to be able to choose and merge information from these two types of sources. We propose to implement such models within neural network frameworks with structures (Socher et al., 2013) , in which the merging parameters can be optimized in a principled way, to minimize a well-defined objective. We conduct experiments on the Stanford Sentiment Treebank and show that the proposed models achieve better results over the model that does not consider this property.", |
| "cite_spans": [ |
| { |
| "start": 345, |
| "end": 366, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and future work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Although the empirical improvement may depend on the percentage of non-compositional instances in a data set or the sentiment that need to be learned holistically, we present here the first effort, according to our knowledge, on studying the basic concern of integrating compositional and non-compositional sentiment in composition. While we focus on sentiment in this paper, investigating compositional and non-compositional semantics for general semantic composition with neural networks is interesting to us as an immediate future problem, as such models provide a principled way to optimize the overall objective over the sentence structures when we consider both compositional and non-compositional semantics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and future work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The package only gives approximate accuracies for 2category sentiment, which are not included here in the table.2 http://nlp.stanford.edu/sentiment/code.html 3 The matlab code used in(Socher et al., 2013) is not published.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Learning with compositional semantics as structural inference for subsentential sentiment analysis", |
| "authors": [ |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing, EMNLP '08", |
| "volume": "", |
| "issue": "", |
| "pages": "793--801", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yejin Choi and Claire Cardie. 2008. Learning with com- positional semantics as structural inference for subsen- tential sentiment analysis. In Proceedings of the Con- ference on Empirical Methods in Natural Language Processing, EMNLP '08, pages 793-801, Honolulu, Hawaii.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Enhanced sentiment learning using Twitter hashtags and smileys", |
| "authors": [ |
| { |
| "first": "Dmitry", |
| "middle": [], |
| "last": "Davidov", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Tsur", |
| "suffix": "" |
| }, |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Rappoport", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 23rd International Conference on Computational Linguistics: Posters, COLING '10", |
| "volume": "", |
| "issue": "", |
| "pages": "241--249", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dmitry Davidov, Oren Tsur, and Ari Rappoport. 2010. Enhanced sentiment learning using Twitter hashtags and smileys. In Proceedings of the 23rd International Conference on Computational Linguistics: Posters, COLING '10, pages 241-249, Beijing, China.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "SENTI-WORDNET: A publicly available lexical resource for opinion mining", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Esuli", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabrizio", |
| "middle": [], |
| "last": "Sebastiani", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 5th Conference on Language Resources and Evaluation, LREC '06", |
| "volume": "", |
| "issue": "", |
| "pages": "417--422", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Esuli and Fabrizio Sebastiani. 2006. SENTI- WORDNET: A publicly available lexical resource for opinion mining. In In Proceedings of the 5th Confer- ence on Language Resources and Evaluation, LREC '06, pages 417-422.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Twitter sentiment classification using distant supervision", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Go", |
| "suffix": "" |
| }, |
| { |
| "first": "Richa", |
| "middle": [], |
| "last": "Bhayani", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Go, Richa Bhayani, and Lei Huang. 2009. Twit- ter sentiment classification using distant supervision. Technical report, Stanford University.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Learning task-dependent distributed representations by backpropagation through structure", |
| "authors": [ |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Goller", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Kchler", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proc. of the ICNN-96", |
| "volume": "", |
| "issue": "", |
| "pages": "347--352", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christoph Goller and Andreas Kchler. 1996. Learning task-dependent distributed representations by back- propagation through structure. In In Proc. of the ICNN-96, pages 347-352, Bochum, Germany. IEEE.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Predicting the semantic orientation of adjectives", |
| "authors": [ |
| { |
| "first": "Vasileios", |
| "middle": [], |
| "last": "Hatzivassiloglou", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathleen", |
| "middle": [ |
| "R" |
| ], |
| "last": "Mckeown", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Proceedings of the 8th Conference of European Chapter of the Association for Computational Linguistics, EACL '97", |
| "volume": "", |
| "issue": "", |
| "pages": "174--181", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vasileios Hatzivassiloglou and Kathleen R. McKeown. 1997. Predicting the semantic orientation of ad- jectives. In Proceedings of the 8th Conference of European Chapter of the Association for Computa- tional Linguistics, EACL '97, pages 174-181, Madrid, Spain.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Long shortterm memory", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Hochreiter and J. Schmidhuber. 1997. Long short- term memory. Neural Computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Mining and summarizing customer reviews", |
| "authors": [ |
| { |
| "first": "Minqing", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 10th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '04", |
| "volume": "", |
| "issue": "", |
| "pages": "168--177", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minqing Hu and Bing Liu. 2004. Mining and summa- rizing customer reviews. In Proceedings of the 10th ACM SIGKDD International Conference on Knowl- edge Discovery and Data Mining, KDD '04, pages 168-177, New York, NY, USA. ACM.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Accurate unlexicalized parsing", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 41st Annual Meeting on Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "423--430", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Klein and Christopher D. Manning. 2003. Accurate unlexicalized parsing. In Proceedings of the 41st An- nual Meeting on Association for Computational Lin- guistics -Volume 1, ACL '03, pages 423-430, Sap- poro, Japan. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Twitter sentiment analysis: The Good the Bad and the OMG!", |
| "authors": [ |
| { |
| "first": "Efthymios", |
| "middle": [], |
| "last": "Kouloumpis", |
| "suffix": "" |
| }, |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Johanna", |
| "middle": [], |
| "last": "Moore", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 5th International AAAI Conference on Weblogs and Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Efthymios Kouloumpis, Theresa Wilson, and Johanna Moore. 2011. Twitter sentiment analysis: The Good the Bad and the OMG! In Proceedings of the 5th In- ternational AAAI Conference on Weblogs and Social Media.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A survey of opinion mining and sentiment analysis", |
| "authors": [ |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Mining Text Data", |
| "volume": "", |
| "issue": "", |
| "pages": "415--463", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bing Liu and Lei Zhang. 2012. A survey of opinion mining and sentiment analysis. In Charu C. Aggar- wal and ChengXiang Zhai, editors, Mining Text Data, pages 415-463. Springer US.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Machine learning", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Tom", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom M Mitchell. 1997. Machine learning. 1997. Burr Ridge, IL: McGraw Hill, 45.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Emotions evoked by common words and phrases: Using Mechanical Turk to create an emotion lexicon", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the NAACL-HLT Workshop on Computational Approaches to Analysis and Generation of Emotion in Text", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad and Peter D. Turney. 2010. Emo- tions evoked by common words and phrases: Using Mechanical Turk to create an emotion lexicon. In Pro- ceedings of the NAACL-HLT Workshop on Computa- tional Approaches to Analysis and Generation of Emo- tion in Text, LA, California.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Generating high-coverage semantic orientation lexicons from overtly marked words and a thesaurus", |
| "authors": [ |
| { |
| "first": "Saif", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Cody", |
| "middle": [], |
| "last": "Dunne", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [], |
| "last": "Dorr", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "2", |
| "issue": "", |
| "pages": "599--608", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif Mohammad, Cody Dunne, and Bonnie Dorr. 2009. Generating high-coverage semantic orientation lexi- cons from overtly marked words and a thesaurus. In Proceedings of the Conference on Empirical Methods in Natural Language Processing: Volume 2, EMNLP '09, pages 599-608, Singapore.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "NRC-Canada: Building the state-of-the-art in sentiment analysis of tweets", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the International Workshop on Semantic Evaluation, SemEval '13", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Mohammad, S. Kiritchenko, and X. Zhu. 2013a. NRC-Canada: Building the state-of-the-art in senti- ment analysis of tweets. In Proceedings of the Inter- national Workshop on Semantic Evaluation, SemEval '13, Atlanta, Georgia, USA, June.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "#emotional tweets", |
| "authors": [ |
| { |
| "first": "Saif", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the First Joint Conference on Lexical and Computational Semantics, *SEM '12", |
| "volume": "", |
| "issue": "", |
| "pages": "246--255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif Mohammad. 2012. #emotional tweets. In Pro- ceedings of the First Joint Conference on Lexical and Computational Semantics, *SEM '12, pages 246- 255, Montr\u00e9al, Canada. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Sentiment composition", |
| "authors": [ |
| { |
| "first": "Karo", |
| "middle": [], |
| "last": "Moilanen", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Pulman", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of RANLP 2007", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karo Moilanen and Stephen Pulman. 2007. Senti- ment composition. In Proceedings of RANLP 2007, Borovets, Bulgaria.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "The measurement of meaning", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Charles", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [ |
| "J" |
| ], |
| "last": "Osgood", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Suci", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tannenbaum", |
| "suffix": "" |
| } |
| ], |
| "year": 1957, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Charles E Osgood, George J Suci, and Percy Tannen- baum. 1957. The measurement of meaning. Univer- sity of Illinois Press.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics, ACL '05", |
| "volume": "", |
| "issue": "", |
| "pages": "115--124", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang and Lillian Lee. 2005. Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales. In Proceedings of the Annual Meeting of the Association for Computational Linguis- tics, ACL '05, pages 115-124.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Opinion mining and sentiment analysis. Foundations and Trends in Information Retrieval", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "1--135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang and Lillian Lee. 2008. Opinion mining and sentiment analysis. Foundations and Trends in Infor- mation Retrieval, 2(1-2):1-135.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Semi-supervised recursive autoencoders for predicting sentiment distributions", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Jeffrey Pennington, Eric Huang, An- drew Y. Ng, and Christopher D. Manning. 2011. Semi-supervised recursive autoencoders for predicting sentiment distributions. In Conference on Empirical Methods in Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Semantic compositionality through recursive matrix-vector spaces", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Brody", |
| "middle": [], |
| "last": "Huval", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "12", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Brody Huval, Christopher D. Manning, and Andrew Y. Ng. 2012. Semantic compositionality through recursive matrix-vector spaces. In Proceed- ings of the Conference on Empirical Methods in Nat- ural Language Processing, EMNLP '12, Jeju, Korea. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Perelygin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Chuang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing, EMNLP '13", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Alex Perelygin, Jean Y. Wu, Jason Chuang, Christopher D. Manning, Andrew Y. Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the Conference on Empirical Meth- ods in Natural Language Processing, EMNLP '13, Seattle, USA. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Learning sentiment-specific word embedding for twitter sentiment classification", |
| "authors": [ |
| { |
| "first": "Duyu", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Duyu Tang, Furu Wei, Nan Yang, Ming Zhou, Ting Liu, and Bing Qin. 2014. Learning sentiment-specific word embedding for twitter sentiment classification. In Proceedings of ACL, Baltimore, Maryland, USA, June.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Measuring praise and criticism: Inference of semantic orientation from association", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Michael L Littman", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "ACM Transactions on Information Systems", |
| "volume": "21", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Turney and Michael L Littman. 2003. Measuring praise and criticism: Inference of semantic orientation from association. ACM Transactions on Information Systems, 21(4).", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Recognizing contextual polarity in phrase-level sentiment analysis", |
| "authors": [ |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the Conference on Human Language Technology and Empirical Methods in Natural Language Processing, HLT '05", |
| "volume": "", |
| "issue": "", |
| "pages": "347--354", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theresa Wilson, Janyce Wiebe, and Paul Hoffmann. 2005. Recognizing contextual polarity in phrase-level sentiment analysis. In Proceedings of the Confer- ence on Human Language Technology and Empirical Methods in Natural Language Processing, HLT '05, pages 347-354, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "An empirical study on the effect of negation words on sentiment", |
| "authors": [ |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongyu", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Saif", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaodan Zhu, Hongyu Guo, Saif Mohammad, and Svet- lana Kiritchenko. 2014. An empirical study on the effect of negation words on sentiment. In Proceedings of ACL, Baltimore, Maryland, USA, June.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "text": "An input-gated network that explicitly controls the compositional and non-compositional sentiment input.", |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "text": "have zerovalued blocks. More specifically, the top-right d-byl block of the piece matrix V", |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "text": "4 java -mx8g edu.stanford.nlp.sentiment.SentimentTraining -numHid 25 -trainPath train.txt -devPath dev.txt -train -model model.ser.gz", |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF4": { |
| "uris": null, |
| "text": "Errors made at different depths in the sentiment tree bank.", |
| "type_str": "figure", |
| "num": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "content": "<table><tr><td>Models</td><td>sentence-level (roots)</td><td>all phrases (all nodes)</td></tr><tr><td>(1) RNTN</td><td>42.44</td><td>79.95</td></tr><tr><td>(2) Regular-bilinear (auto)</td><td>42.37</td><td>79.97</td></tr><tr><td>(3) Regular-bilinear (manu)</td><td>42.98</td><td>80.14</td></tr><tr><td>(4) Explicitly-gated (auto)</td><td>42.58</td><td>80.06</td></tr><tr><td>(5) Explicitly-gated (manu)</td><td>43.21</td><td>80.21</td></tr><tr><td>(6) Confined-tensor (auto)</td><td>42.99</td><td>80.49</td></tr><tr><td>(7) Confined-tensor (manu)</td><td>43.75 \u2020</td><td>80.66 \u2020</td></tr><tr><td colspan=\"3\">Table 1: Model performances (accuracies) on predicting 5-category sentiment at the sentence (root) level and phrase-</td></tr><tr><td>level on Stanford Sentiment Treebank.</td><td/><td/></tr><tr><td/><td/><td>are all based on the ver-</td></tr><tr><td/><td colspan=\"2\">sion 3.3.0 of the Stanford CoreNLP 2 and our imple-</td></tr><tr><td/><td colspan=\"2\">mentation of PESN on it. The CoreNLP includes</td></tr><tr><td/><td colspan=\"2\">a java implementation of RNTN. 3 To make the re-</td></tr><tr><td/><td colspan=\"2\">sults reported in the table comparable, we trained the</td></tr></table>", |
| "html": null, |
| "num": null, |
| "text": "The numbers in the bold font are the best performances achieved on the two tasks. Both results are statistically significantly better (p < 0.05) than the corresponding RNTN results." |
| } |
| } |
| } |
| } |