| { |
| "paper_id": "S16-1027", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:24:50.195841Z" |
| }, |
| "title": "UNIMELB at SemEval-2016 Tasks 4A and 4B: An Ensemble of Neural Networks and a Word2Vec Based Model for Sentiment Classification", |
| "authors": [ |
| { |
| "first": "Xingyi", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Huizhi", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "oklianghuizi@gmail.com" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes our sentiment classification system for microblog-sized documents, and documents where a topic is present. The system consists of a softvoting ensemble of a word2vec language model adapted to classification, a convolutional neural network (CNN), and a longshort term memory network (LSTM). Our main contribution consists of a way to introduce topic information into this model, by concatenating a topic embedding, consisting of the averaged word embedding for that topic, to each word embedding vector in our neural networks. When we apply our models to SemEval 2016 Task 4 subtasks A and B, we demonstrate that the ensemble performed better than any single classifier, and our method of including topic information achieves a substantial performance gain. According to results on the official test sets, our model ranked 3rd for PN in the message-only subtask A (among 34 teams) and 1st for accuracy on the topic-dependent subtask B (among 19 teams). 1 There were some issues surrounding the evaluation metrics. We only got 7th for PN and 2nd for PN officially, but when we retrained our model using PN as the subtask intended, we place first across all metrics.", |
| "pdf_parse": { |
| "paper_id": "S16-1027", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes our sentiment classification system for microblog-sized documents, and documents where a topic is present. The system consists of a softvoting ensemble of a word2vec language model adapted to classification, a convolutional neural network (CNN), and a longshort term memory network (LSTM). Our main contribution consists of a way to introduce topic information into this model, by concatenating a topic embedding, consisting of the averaged word embedding for that topic, to each word embedding vector in our neural networks. When we apply our models to SemEval 2016 Task 4 subtasks A and B, we demonstrate that the ensemble performed better than any single classifier, and our method of including topic information achieves a substantial performance gain. According to results on the official test sets, our model ranked 3rd for PN in the message-only subtask A (among 34 teams) and 1st for accuracy on the topic-dependent subtask B (among 19 teams). 1 There were some issues surrounding the evaluation metrics. We only got 7th for PN and 2nd for PN officially, but when we retrained our model using PN as the subtask intended, we place first across all metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The rapid growth of user-generated content, much of which is sentiment-laden, has fueled an interest in sentiment analysis (Pang and Lee, 2008; Liu, 2010). One popular form of sentiment analysis involves classifying a document into discrete classes, depending on whether it expresses positive or negative sentiment (or neither). The classification can also be dependent upon a particular topic. In this work, we describe the method we used for the sentiment classification of tweets, with or without a topic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our approach to the document classification task consists of an ensemble of 3 classifiers via soft-voting, 2 of which are neural network models. One is the convolutional neural network (CNN) architecture of Kim (2014) , and another is a Long Short Term Memory (LSTM)-based network (Hochreiter and Schmidhuber, 1997). Both were first tuned on a distant-labelled data set. The third classifier adapted word2vec to output classification probabilities using Bayes' formula, a slightly modified version of Taddy (2015) . Despite the word2vec classifier being intended as a baseline, and having a small weight in the ensemble, it proved crucial for the ensemble to work well. To adapt our models to the case where a topic is present, in the neural network models, we concatenated the embedding vectors for each word with a topic embedding, which consisted of the element-wise average of all word vectors in a particular topic.", |
| "cite_spans": [ |
| { |
| "start": 207, |
| "end": 217, |
| "text": "Kim (2014)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 501, |
| "end": 513, |
| "text": "Taddy (2015)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We applied our approach to SemEval 2016 Task 4, including the message-only subtask (Task A) and the topic-dependent subtask (Task B) (Nakov et al., to appear). Our model ranked third for PN in the message-only subtask A (among 34 teams) and first for accuracy 1 on the topic-dependent subtask B (among 19 teams).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The source code for our approach is available at https://github.com/stevenxxiu/senti.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We now describe the classifiers we used in detail, our ensemble method, and our motivations behind choosing these classifiers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We used the dynamic architecture of Kim (2014) for our convolutional neural network. This consists of a single 1-d convolution layer with a nonlinearity, a max-pooling layer, a dropout layer, and a softmax classification layer.", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 46, |
| "text": "Kim (2014)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Convolutional neural network", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "This model was chosen since it was a good performer empirically. However, due to maxpooling, this model is essentially a bag-ofphrases model, which ignores important ordering information if the tweet contains a long argument, or 2 sentences. We now give a review of the layers used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Convolutional neural network", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The input to the model is a document, treated as a sequence of words. Each word can possibly be represented by a vector of occurrences, a vector of counts, or a vector of features. A vector of learnt, instead of hand-crafted features, is also called a word embedding. This tends to have dimensionality \u226a | |, the vocabulary size. Hence the vectors are dense, which allows us to learn more functions of features with limited data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word embedding layer", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "Given an embedding of dimension , emb \u2208 R \u00d7| | , we mapped map each document to a matrix emb, \u2208 R \u00d7| | , with each word corresponding to a row vector in the order they appear in. emb can be trained.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word embedding layer", |
| "sec_num": "2.1.1" |
| }, |
| { |
| "text": "A 1-d convolution layer aims to extract patterns useful for classification, by sliding a fixedlength filter along the input. The convolution operation for an input matrix \u2208 R \u00d7| | and a single filter \u2208 R \u00d7 of width creates a feature conv \u2208 R | |+ \u22121 by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "1-d convolution layer", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "conv, = \u2211\ufe01 , ( [:, : + \u22121] \u2299 ) , + conv", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "1-d convolution layer", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "where \u2299 is element-wise multiplication, and conv is a bias. There are typically > 1 filters, which by stacking the feature vectors, results in conv \u2208 R \u00d7(| |+ \u22121) . Each filter has its own separate bias.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "1-d convolution layer", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "We used a common modification to the filter sliding, by padding the document embedding matrix with \u2212 1 zeroes on its top and bottom. This is done so that every word in the document is covered by filters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "1-d convolution layer", |
| "sec_num": "2.1.2" |
| }, |
| { |
| "text": "There may be very few phrases targeted by a feature map in the document. For this reason, we only need to know if the desired feature is present in the document, which can be obtained by taking the maximum. Formally, we obtain a vector \u2208 R , such that:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Max pooling layer", |
| "sec_num": "2.1.3" |
| }, |
| { |
| "text": "pool, = max conv, ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Max pooling layer", |
| "sec_num": "2.1.3" |
| }, |
| { |
| "text": "To convert our features into classification probabilities, we first use a dense layer, defined by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Softmax layer", |
| "sec_num": "2.1.4" |
| }, |
| { |
| "text": "dense = dense \u2022 pool + dense", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Softmax layer", |
| "sec_num": "2.1.4" |
| }, |
| { |
| "text": "with a softmax activation function: softmax( ) = \u2211\ufe00 such that the output dimension is the same as the number of classes. Note that output values are non-negative and sum to 1, which form a discrete probability distribution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Softmax layer", |
| "sec_num": "2.1.4" |
| }, |
| { |
| "text": "To regularize our CNN model, dropout (Srivastava et al., 2014) is used after the max pooling layer. Intuitively, dropout assumes that we can still obtain a reasonable classification even when some of the features are dropped. To do this, each dimension is randomly set to 0 using a Bernoulli distribution ( ). In order to have the training and testing to be of the same order, the test outputs can be scaled by .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regularization", |
| "sec_num": "2.1.5" |
| }, |
| { |
| "text": "The softmax layer for the CNN model also uses a form of empirical Bayes regularization, where each row of soft is restricted using an \u2113 2 norm, by re-normalizing the vector if the norm threshold is exceeded.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regularization", |
| "sec_num": "2.1.5" |
| }, |
| { |
| "text": "We used an LSTM for our recurrent architecture, which consisted of an embedding layer, LSTM layer, and a softmax classification layer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "A recurrent neural network is a neural network designed for sequential problems. Even simple RNNs are Turing complete, and they can theoretically obtain information from the entire sequence instead of only an unordered bag of phrases. But finding good architectures which can capture this and training them can be difficult. Indeed, there were many instances where our LSTM failed to capture important ordering information. We now give a brief review of the LSTM.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Given an input sequence = [ 1 , . . . , ], a recurrent network defines an internal state function and an output function to iterate over , so that at time step :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "= ( , \u22121 , \u22121 ) = ( , \u22121 , )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where 0 and 0 are initial bias states. The simplest RNN, where:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "( , \u22121 , ) = tanh( \u2022 [\ufe02 \u22121 ]\ufe02 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "suffers from the gradient vanishing and exploding problem (Hochreiter and Schmidhuber, 1997). In particular, products of saturated tanh activations can vanish the gradient, and products of can vanish or explode the gradient. The LSTM is a way to remedy this (Hochreiter and Schmidhuber, 1997). It sets:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "= ( + \u210e \u210e \u22121 + \u2299 \u22121 + ) = ( + \u210e \u210e \u22121 + \u2299 \u22121 + ) = \u2299 \u22121 + \u2299 ( + \u210e \u210e \u22121 + ) = ( + \u210e \u210e \u22121 + \u2299 + ) \u2299 \u210e ( )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Here,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022 is made up of weights; , , , are the input gates, forget gates, cell states and output states. Cell states have an identity activation function. The gradient will not vanish if input needs to be carried to , since this can only happen when the forget gates are near 1. However, gradient explosion can still be present. We use the common approach of cutting off gradients above a threshold for the gradients inside , , , .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "To use the LSTM, we first used a document embedding matrix in the same manner as our convolutional neural network architecture. This was fed into to an LSTM layer. The output for the final timestep of the LSTM layer was then fed into a final softmax layer with the appropriate output size for classification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We also experimented with a similar and commonly used network, the Gated Recurrent Network (GRU), but it was not used due to lower results compared to the LSTM.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long short term memory network", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Our word2vec Bayes model is our baseline model, and described in Taddy (2015) , with the inclusion of a class prior. Taddy (2015) uses Bayes formula to compute the probabilities of a document belonging to a sentiment class. Given a document , its words { } , label , Bayes formula is:", |
| "cite_spans": [ |
| { |
| "start": 65, |
| "end": 77, |
| "text": "Taddy (2015)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "word2vec Bayes", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "( | ) = ( | ) ( ) ( )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "word2vec Bayes", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "For classification problems, we can ignore ( ) since is fixed. ( | ) is estimated by first training word2vec on a subset of the corpus with label , then using the skipgram objective composite likelihood as an approximation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "word2vec Bayes", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "log ( | ) \u2248 \u2211\ufe01 \u2208 | | \u2211\ufe01 =1 | | \u2211\ufe01 =1 1 1\u2264| \u2212 |\u2264 log ( | , )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "word2vec Bayes", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We estimated ( ) via class frequencies, i.e. the MLE for the categorical distribution, compared to Taddy (2015), who used the discrete uniform prior. This model was chosen since it provides a reasonable baseline, and also appears to be independent enough from our neural networks to provide a performance gain in the ensemble. The word2vec based model benefits from being a semi-supervised method, but it also loses ordering information outside a word's context window, and the prediction of neighboring words also ignores distance to that word. The limited amount of data for each class is also an issue.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "word2vec Bayes", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "If the errors made by each classifier are independent enough, then combining them in an ensemble can reduce the overall error rate. We used soft voting as a method to combine the outputs of the above classifiers. We define soft voting as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ensemble", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "vote = \u2211\ufe01 , s.t. \u2211\ufe01 = 1, \u2200 : \u2265 0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ensemble", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "where is the output of classifier .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ensemble", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "To adapt our neural networks to topicdependent sentiment classification, in our neural network models, we augmented each embedding vector by concatenating it with a topic embedding. The motivation behind this approach is to allow the model to interpret each word to be within the context of some topic. Our topic embeddings were obtained by the element-wise average of word embeddings for each word in that topic. We found that empirically, this is a simple yet effective way of achieving a document embedding. When used directly as a feature vector in logistic regression for sentiment analysis, we have found this to outperform methods described in Le and Mikolov (2014) .", |
| "cite_spans": [ |
| { |
| "start": 651, |
| "end": 672, |
| "text": "Le and Mikolov (2014)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic dependent models", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Word embeddings of dimension = 300 pretrained over Google News were used directly, without any further tuning. Words in the tweet which were not present in this pretrained embedding were ignored.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic dependent models", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We evaluated our models on SemEval 2016 Task 4 subtask A, the message-only subtask, and subtask B, the topic-dependent subtask.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Task A consisted of 3 sentiment classes -positive, neutral and negative -whilst Task B consisted of 2 sentiment classes -positive and negative. We only managed to download 90% of the entire set of tweets for the 2016 SemEval data, due to tweets becoming \"not available\". In addition to the Twitter data for 2016, for Task A we also used training data from SemEval 2016 Task 10. The data is summarized in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 404, |
| "end": 411, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "To pretrain our network using distant learning (described below), we took a random sample of 10M English tweets from a 5.3TB Twitter dataset crawled from 18 June to 4 Dec, 2014 using the Twitter Trending API. We then processed tweets with a regular expression: tweets which contained emoticons like :) were considered positive, while those which contained emoticons like :( were considered negative; tweets which contained both positive and negative emoticons, or others emoticons such as :-, were ignored. We extracted 1M tweets each for the positive and negative classes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Evaluation consisted of accuracy, macroaveraged 1 across the positive and negative classes, which we denote PN , and macroaveraged recall across the positive and negative classes, which we denote PN .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "All methods use the same preprocessing. We normalized the tweets by first replacing URLs with url and author methods such as @Ladiibubblezz with author . Casing was preserved, as the pretrained word2vec vectors included casing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The tweets were tokenized using twokenize, with it's being split into it and 's.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "For both our models, we initialized emb to word embeddings pretrained using word2vec's skip-gram model (Mikolov et al., 2013) on the Google News corpus, where = 300. Unknown words were drawn from [\u22120.25, 0.25] to match the variance of the pretrained vectors. For the CNN model, we also stripped words so all documents had a length \u2264 56. For our CNN, we used used 3 1-d convolutions, with filter sizes of 3, 4, 5, each with 100 filters. Our dropout rate was 0.5, and our \u2113 2 norm restriction was 3. For our LSTM, we used a cell dimension size of 300, and our activations were chosen empirically to be = = = sigmoid, = tanh, our gradient cutoff was 100.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural networks", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "To train our neural networks, we used cross entropy loss and minibatch gradient descent with a batch size of 64. For our CNN, we used the adadelta (Zeiler, 2012) gradient descent method with the default settings. For our LSTM, we used the rmsprop gradient descent method with a learning rate of 0.01.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural networks", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "Due to limited training data, we can use distant learning (Severyn and Moschitti, 2015), by initializing the weights of our neural networks by first training them on a silver standard data set (generated using Twitter emoticons which we describe below), then tuning them further on the gold standard data set (Severyn and Moschitti, 2015). However, we did not use this for our topic-dependent models, as there was no performance gain.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural networks", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "We split the distant data into 10 4 tweets per epoch, and took the best epoch on the validation set as the initial weights, using PN as our scoring metric. We repeatedly iterated over the SemEval data with 10 3 tweets per epoch for 10 2 epochs, and again took the best epoch on the validation set as the final weights.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural networks", |
| "sec_num": "4.4.1" |
| }, |
| { |
| "text": "Gensim (\u0158eh\u016f\u0159ek and Sojka, 2010) was used to train the word embeddings and obtain ( | ). We used the skipgram objective, with a embedding dimension of 100, window size of 10, hierarchical softmax with 5 samples, 20 training iterations, no frequent word cutoff, and a sam- ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "word2vec Bayes", |
| "sec_num": "4.4.2" |
| }, |
| { |
| "text": "To find , we first relaxed the sum condition of by setting 1 = 1 and noting that max , , is invariant under scaling. We then used the L-BFGS-B algorithm, with initial weights of 1, combined with basin-hopping for 1000 iterations. We optimized for accuracy, since this most-consistently improved results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Soft voting", |
| "sec_num": "4.4.3" |
| }, |
| { |
| "text": "The official evaluation results are shown in Table 2 and Table 3 . The results for Task A suggest that our models are overfitting. Our best position was achieved on the Twitter 2016 dataset, and indeed, this is what our parameters were chosen on.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 45, |
| "end": 64, |
| "text": "Table 2 and Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our own evaluation of our different classifiers, using Twitter 2016-test, is shown in The dashed line separates topic models from message-only models. The lstm topic model has poorer accuracy due to being optimized on PN . Table 5 . Taking into account all evaluation metrics, we can see that in both tasks, our CNN outperforms our LSTM, in Task A slightly and in Task B substantially. The word2vec Bayes model is worse than both, moreso in Task B. Soft voting outperforms all classifiers, showing that there is some independence amongst the errors made. In Task A, there appears to be more correlation between the CNN and LSTM classifiers, as excluding the word2vec Bayes model reduces the performance. In Task B, the word2vec Bayes model appears to perform too poorly to provide a marked benefit.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 223, |
| "end": 230, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "From Table 5 we can see that the inclusion of topic information provides a substantial boost to both of our neural networks. This shows that our method of incorporating topic information is a useful way of modifying neural networks, and provides a strong baseline for alternative ways of doing this.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 5, |
| "end": 12, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We described our ensemble approach to sentiment analysis both the task of topic-dependent document classification and document classifi-cation by itself. We gave a detailed description of how to modify our classifiers to be topicdependent. The results show that ensembles can work for neural nets, and that our way of including topics achieves performance gains, and forms a good basis for future research in this area.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Long short-term memory", |
| "authors": [], |
| "year": 1997, |
| "venue": "Sepp Hochreiter and J\u00fcrgen Schmidhuber", |
| "volume": "9", |
| "issue": "", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "[Hochreiter and Schmidhuber1997] Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Convolutional neural networks for sentence classification", |
| "authors": [ |
| { |
| "first": "Yoon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1746--1751", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoon Kim. 2014. Convolutional neural networks for sentence classification. In Proceed- ings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1746-1751, Doha, Qatar.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Distributed representations of sentences and documents", |
| "authors": [ |
| { |
| "first": "]", |
| "middle": [], |
| "last": "Mikolov2014", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1405.4053" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "and Mikolov2014] Quoc V. Le and Tomas Mikolov. 2014. Distributed representations of sentences and documents. In arXiv preprint arXiv:1405.4053.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Sentiment analysis and subjectivity", |
| "authors": [ |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bing Liu. 2010. Sentiment analysis and subjectivity. In Nitin Indurkhya and Fred J.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Opinion mining and sentiment analysis. Foundations and trends in information retrieval", |
| "authors": [ |
| { |
| "first": ";", |
| "middle": [], |
| "last": "Damerau", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "\u0158eh\u016f\u0159ek and Sojka2010] Radim\u0158eh\u016f\u0159ek and Petr Sojka. 2010. Software framework for topic modelling with large corpora", |
| "volume": "4", |
| "issue": "", |
| "pages": "1929--1958", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Damerau, editors, Handbook of Natural Language Processing. Chapman & Hall/CRC, Boca Raton, USA, 2nd edition. [Mikolov et al.2013] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S. Corrado, and Jeff Dean. 2013. Distributed representations of words and phrases and their compositionality. In Advances in neural information processing systems, pages 3111-3119. [Nakov et al.to appear] Preslav Nakov, Alan Ritter, Sara Rosenthal, Veselin Stoyanov, and Fabrizio Sebastiani. to appear. SemEval-2016 Task 4: Sen- timent analysis in Twitter. In Proceedings of the 10th International Workshop on Semantic Evalu- ation (SemEval 2016), San Diego, USA. [Pang and Lee2008] Bo Pang and Lillian Lee. 2008. Opinion mining and sentiment analysis. Founda- tions and trends in information retrieval, 2(1-2):1- 135. [\u0158eh\u016f\u0159ek and Sojka2010] Radim\u0158eh\u016f\u0159ek and Petr Sojka. 2010. Software framework for topic modelling with large corpora. In Proceed- ings of the LREC 2010 Workshop on New Challenges for NLP Frameworks, pages 45-50, Valletta, Malta. ELRA. http://is.muni.cz/ publication/884893/en. [Severyn and Moschitti2015] Aliaksei Severyn and Alessandro Moschitti. 2015. UNITN: Training deep convolutional neural network for Twitter sen- timent classification. In Proceedings of the 9th International Workshop on Semantic Evaluation (SemEval), pages 464-469, Denver, USA. [Srivastava et al.2014] Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Rus- lan Salakhutdinov. 2014. Dropout: A sim- ple way to prevent neural networks from overfit- ting. The Journal of Machine Learning Research, 15(1):1929-1958.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Document classification by inversion of distributed language representations", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Taddy", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1504.07295" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Taddy. 2015. Document classifi- cation by inversion of distributed language repre- sentations. In arXiv:1504.07295 [cs, stat].", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "ADADELTA: An adaptive learning rate method", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zeiler", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1212.5701" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew D. Zeiler. 2012. ADADELTA: An adaptive learning rate method. arXiv preprint arXiv:1212.5701.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "text": "Semeval-2016 data. The negative : neutral : positive split was 16 : 42 : 42 for all of 2016 Task A used. The negative : positive split was 19 : 81 for all of 2016 Task B used.", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Datset</td><td colspan=\"4\">A total A used B total B used</td></tr><tr><td colspan=\"2\">Twitter 2016-train 6000</td><td>5465</td><td>4346</td><td>3941</td></tr><tr><td/><td/><td>(+11340)</td><td/><td/></tr><tr><td>Twitter 2016-dev</td><td>2000</td><td>1829</td><td>1325</td><td>1210</td></tr><tr><td>Twitter 2016-test</td><td>2000</td><td>1807</td><td>1417</td><td>1270</td></tr></table>" |
| }, |
| "TABREF2": { |
| "text": "", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"5\">: Official test scores and ranks for Task</td></tr><tr><td>A.</td><td/><td/><td/></tr><tr><td>Dataset</td><td colspan=\"2\">Val metric PN</td><td>PN</td><td>Acc</td></tr><tr><td>Twitter 2016 Twitter 2016</td><td>PN PN</td><td colspan=\"3\">0.758 7 0.788 2 0.870 1 0.807 1 0.806 1 0.867 1</td></tr></table>" |
| }, |
| "TABREF3": { |
| "text": "Test scores and ranks for Task B. The official run incorrectly used PN as the validation metric.", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td>ple coefficient of 0.</td></tr></table>" |
| }, |
| "TABREF4": { |
| "text": "", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td>and</td></tr></table>" |
| }, |
| "TABREF5": { |
| "text": "Results for Task B, sorted by PN .", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |