| { |
| "paper_id": "S18-1021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:43:24.688891Z" |
| }, |
| "title": "Epita at SemEval-2018 Task 1: Sentiment Analysis Using Transfer Learning Approach", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Daval-Frerot", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Abdessalam", |
| "middle": [], |
| "last": "Bouchekif", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Anatole", |
| "middle": [], |
| "last": "Moreau", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper we present our system for detecting valence task. The major issue was to apply a state-of-the-art system despite the small dataset provided : the system would quickly overfit. The main idea of our proposal is to use transfer learning, which allows to avoid learning from scratch. Indeed, we start to train a first model to predict if a tweet is positive, negative or neutral. For this we use an external dataset which is larger and similar to the target dataset. Then, the pre-trained model is re-used as the starting point to train a new model that classifies a tweet into one of the seven various levels of sentiment intensity. Our system, trained using transfer learning, achieves 0.776 and 0.763 respectively for Pearson correlation coefficient and weighted quadratic kappa metrics on the subtask evaluation dataset.", |
| "pdf_parse": { |
| "paper_id": "S18-1021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper we present our system for detecting valence task. The major issue was to apply a state-of-the-art system despite the small dataset provided : the system would quickly overfit. The main idea of our proposal is to use transfer learning, which allows to avoid learning from scratch. Indeed, we start to train a first model to predict if a tweet is positive, negative or neutral. For this we use an external dataset which is larger and similar to the target dataset. Then, the pre-trained model is re-used as the starting point to train a new model that classifies a tweet into one of the seven various levels of sentiment intensity. Our system, trained using transfer learning, achieves 0.776 and 0.763 respectively for Pearson correlation coefficient and weighted quadratic kappa metrics on the subtask evaluation dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The goal of detecting valence task is to classify a given tweet into one of seven classes, corresponding to various levels of positive and negative sentiment intensity, that best represents the mental state of the tweeter. This can be seen as a multiclass classification problem, in which each tweet must be classified in one of the following classes : very negative (-3), moderately negative (-2), slightly negative (-1), neutral/mixed (0), slightly positive (1), moderately positive (2) and very positive (3) .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Several companies have been interested in customer opinion for a given product or service. Sentiment analysis is one approach to automatically detect their emotions from comments posted in social networks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "With the recent advances in deep learning, the ability to analyse sentiments has considerably improved. Indeed, many experiments have used state-of-the-art systems to achieve high perfor-mance. For example, (Baziotis et al., 2017) use Bidirectional Long Short-Term Memory (B-LSTM) with attention mechanisms while (Deriu et al., 2016) use Convolutional Neural Networks (CNN). Both systems obtained the best performance at the the 2016 and 2017 SemEval 4-A task respectively.", |
| "cite_spans": [ |
| { |
| "start": 207, |
| "end": 230, |
| "text": "(Baziotis et al., 2017)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 313, |
| "end": 333, |
| "text": "(Deriu et al., 2016)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The amount of data is argued to be the main condition to train a reliable deep neural network. However, the dataset provided to build our system is limited. To address this issue, two solutions can be considered. The first solution consists in extending our dataset by either manually labeling new data, which can be very time consuming, or by using over-sampling approaches. The second solution consists in applying a transfer learning, which allows to avoid learning the model from scratch.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we apply a transfer learning approach, from a model trained on a similar task : we propose to pre-train a model to predict if a tweet is positive, negative or neutral. Precisely, we apply a B-LSTM on an external dataset. Then, the pre-trained model is re-used to classify a tweet according to the seven-point scale of positive and negative sentiment intensity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of the paper is organized as follows. Section 2 presents a brief definition of transfer learning. The description of our proposed system is presented in Section 3. The experimental setup and results are described in Section 4. Finally, a conclusion is given with a discussion of future works in Section 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Transfer Learning (TL) consists in transferring the knowledge learned on one task to a second related task. In other words, the TL is about training a base network and then copy its first n layers to the first n layers of a target network (Yosinski et al., 2014) . Usually the first n layers of a pre-trained model (or source model) are frozen when training the new model. This means that weights are not changed during training on the new task. TL should not be confused with fine-tuning where the back-propagation error affects the entire neural network (including the first n layers).", |
| "cite_spans": [ |
| { |
| "start": 239, |
| "end": 262, |
| "text": "(Yosinski et al., 2014)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer Learning", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For a limited number of training examples, TL allows to provide more precise predictions than the traditional supervised learning approaches. Moreover, TL significantly speeds up the learning process as training does not start from scratch. For example, (Cirean et al., 2012 ) use a CNN trained to recognize the Latin handwritten characters for the detection of Chinese characters. In natural language processing, TL has improved the performance of several systems from various domains such as : sentiment classification (Glorot et al., 2011) , automatic translation (Zoph et al., 2016) , speech recognition and document classification (Wang and Zheng, 2015) .", |
| "cite_spans": [ |
| { |
| "start": 254, |
| "end": 274, |
| "text": "(Cirean et al., 2012", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 521, |
| "end": 542, |
| "text": "(Glorot et al., 2011)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 567, |
| "end": 586, |
| "text": "(Zoph et al., 2016)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 636, |
| "end": 658, |
| "text": "(Wang and Zheng, 2015)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer Learning", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this section, we present the four main steps of our approach : (1) Text processing to filter the noise from the raw text data, (2) Feature extraction to represent words in tweets as vectors of length 426 by concatenating several features, (3) Pre-training model to predict the tweet polarity (positive, negative or neutral) based on external data and (4) Learning a new model where the pretrained model is adapted to our task by removing the last layer and adding a fully-connected layer followed by an output layer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Tweets are processed using ekphrasis 1 tool which allows to perform the following tasks : tokenization, word normalization, word segmentation (for splitting hashtags) and spell correction (i.e replace a misspelled word with the most probable candidate word). All words are lowercase. E-mails, URLs and user handles are normalized. A detailed description of this tool is given in (Baziotis et al., 2017).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text processing", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Each word in each tweet is represented by a vector of 426 dimensions which are obtained by the concatenation of the following features :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "1. https://github.com/cbaziotis/ ekphrasis -AFINN and Emoji Valence 2 are two lists of english words and emojis rated for valence scoring range from \u22125 (very negative) to +5 (very positive) (Nielsen, 2011 ). -Depeche Mood is a lexicon of 37k words associated with emotion scores (afraid, amused, angry, annoyed, sad, happy, inspired and don't care) (Staiano and Guerini, 2014 (Mohammad and Turney, 2013) , (Mohammad and Kiritchenko, 2015) and (Mohammad, 2017) . The intensity score for both emotions and sentiments takes a value between 0 and 1. -Opinion Lexicon English contains around 7k positive and negative sentiment words for the english language (Hu and Liu, 2004 ). -Sentiment140 is a list of words and their associations with positive and negative sentiment ). -Words embeddings are dense vectors of real numbers capturing the semantic meanings of words. We use datastories embeddings (Baziotis et al., 2017) which were trained on 330M english twitter messages posted from 12/2012 to 07/2016. The embeddings used in this work are 300 dimensional. ", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 204, |
| "text": "(Nielsen, 2011", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 349, |
| "end": 375, |
| "text": "(Staiano and Guerini, 2014", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 376, |
| "end": 403, |
| "text": "(Mohammad and Turney, 2013)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 406, |
| "end": 438, |
| "text": "(Mohammad and Kiritchenko, 2015)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 443, |
| "end": 459, |
| "text": "(Mohammad, 2017)", |
| "ref_id": null |
| }, |
| { |
| "start": 653, |
| "end": 670, |
| "text": "(Hu and Liu, 2004", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 894, |
| "end": 917, |
| "text": "(Baziotis et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature extraction", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The objective is to build a model which allows to predict the tweeter's attitude (positive, negative or neutral). Bidirectional Long Short-Term Memory networks (B-LSTM) (Schuster and Paliwal, 1997) have become a standard for sentiment analysis (Baziotis et al., 2017) (Mousa and Schuller, 2017) (Moore and Rayson, 2017) . B-LSTM consists in two LSTMs in different directions running in parallel : the first forward network reads the input sequence from left to right and the second backward network reads the sequence from right to left. Each LSTM yields a hidden representation : h (left to right vector) and \u2190 \u2212 h (right-to-left vector) which are then combined to compute the output sequence. For our problem, capturing the context of words from both directions allows to better understand the tweet semantic. We here use a B-LSTM network with 2 layers of 150 neurons each. The architecture is shown in Figure 1 (a) .", |
| "cite_spans": [ |
| { |
| "start": 169, |
| "end": 197, |
| "text": "(Schuster and Paliwal, 1997)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 244, |
| "end": 267, |
| "text": "(Baziotis et al., 2017)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 268, |
| "end": 294, |
| "text": "(Mousa and Schuller, 2017)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 295, |
| "end": 319, |
| "text": "(Moore and Rayson, 2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 905, |
| "end": 917, |
| "text": "Figure 1 (a)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Pre-training model", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For training, we use the external dataset 3 composed of 50333 tweets (7840 negatives, 19903 positives and 22590 neutrals).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-training model", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "3. https://github.com/cbaziotis/ datastories-semeval2017-task4/tree/ master/dataset/Subtask_A/downloaded.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-training model", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Let us note that our final objective is to train a model to classify a tweet into seven classes (very negative, moderately negative, slightly negative, neutral, slightly positive, moderately positive and very positive). To train the model, we use the dataset provided for the target task . The training and development dataset contain respectively 1180 and 448 tweets. Since the dataset is small, fine-tuning may result in overfitting. Therefore, we propose to freeze the network layers except the final dense layer that is associated with the three classes sentiment analysis, which is removed after pre-training. Then, we add a fully-connected layer of 150 neurons followed by an output layer of 7 neurons, as illustrated on Figure 1 (b) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 727, |
| "end": 739, |
| "text": "Figure 1 (b)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Learning model", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "The official 4 evaluation metric is Pearson Correlation Coefficient (P ). Submited systems are also evaluated with the weighted quadratic kappa (W ). However, the pre-trained model was evaluated using classification accuracy. We implemented our system using Keras tool with the Tensorflow backend.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": "4" |
| }, |
| { |
| "text": "As proposed in (Baziotis et al., 2017) , we used B-LSTM with the following parameters : size of LSTM layers is 150 (300 for B-LSTM), 2 layers of B-LSTM, with a dropout of 0.3 and 0.5 for embedding and LSTM layers respectively. Other hyperparameters used are : Gaussian noise with \u03c3 of 0.3, and L 2 regularization of 0.0001. We trained the B-LSTM over 18 epochs with a learning rate of 0.01 and batch size of 128 sequences.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 38, |
| "text": "(Baziotis et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-trained model evaluation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We trained our model with external data (more details in section 3.3) but for the evaluation we adapted the training and development sets provided for the target task. The various levels of positive sentiments (i.e slightly, moderately and very positive) were regrouped in the same class. The same goes for the various levels of negative sentiments. Our model achieves 69.4% of accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-trained model evaluation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We adapted the pre-trained model described above by removing the last fully-connected layer, and added a dense layer of 150 neurons followed by an output layer of 7 neurons. As a reminder, the pre-trained layers are frozen. We used the training and development sets to train our system, and evaluated by predicting the valence on the evaluation set. We trained our model over 8 epochs with a learning rate of 0.01 and batch size of 50 sequences. Our model achieves 0.776 and 0.763 respectively on P and W .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Finally, we conducted a set of experiments to validate our system and approach. We evaluated more commonly used systems, with and without transfer learning. These new systems are built by : -using similar number of layers, parameters and hyper-parameters. -replacing B-LSTM layers by LSTM, CNN and dense layers. -for the DNN, computing predictions using the mean of each word-vector of tweets, since it can not use sequences as input. -for the CNN, using multiple convolutional filters of sizes 3, 4 and 5. -for the combinations of systems, averaging the output probabilities. The results are presented on Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 606, |
| "end": 613, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Other experiments", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We can observe that TL approach achieves better scores, and that B-LSTM is leading the score on both approaches as a single system. Moreover, combining systems enhances greatly the prediction without TL, but decreases the score with TL : the combination of independent systems compensates a small lack of data, but becomes useless with enough training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Other experiments", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In this paper, we propose to use a transfer learning approach for sentiment analysis (SemE-val2018 task 1). Using B-LSTM networks, we pretrained a model to predict the tweet polarity (positive, negative or neutral) based on an external dataset of 50k tweets. To avoid the overfitting, layers (except the last one) of the pre-trained model were frozen. A dense layer was then added followed by a seven neurones output layer. Finally, the new network was trained on the small target dataset. The system achieves a score of 0.776 on Pearson Correlation Coefficient.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Improvements could be made concerning the features, and by using attention mechanisms. However, the future work will focus on multiple transfers, to increase the amount of data used in the process. We will perform transfers from two classes (positive and negative) to three classes (adding neutral), then five classes and finally seven classes. Numerous datasets 5 are currently available to deploy such a system.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": ". https://github.com/words/ emoji-emotion", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": ". https://github.com/felipebravom/ SemEval_2018_Task_1_Eval", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank Dr. Yassine Nair Benrekia for interesting scientific discussions.5. http://alt.qcri.org/semeval2016/ task4/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Datastories at semeval-2017 task 6 : Siamese LSTM with attention for humorous text comparison", |
| "authors": [ |
| { |
| "first": "Christos", |
| "middle": [], |
| "last": "Baziotis", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikos", |
| "middle": [], |
| "last": "Pelekis", |
| "suffix": "" |
| }, |
| { |
| "first": "Christos", |
| "middle": [], |
| "last": "Doulkeridis", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation, SemEval@ACL 2017", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christos Baziotis, Nikos Pelekis, and Christos Doulke- ridis. 2017. Datastories at semeval-2017 task 6 : Sia- mese LSTM with attention for humorous text com- parison. In Proceedings of the 11th International Workshop on Semantic Evaluation, SemEval@ACL 2017, Vancouver, Canada.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Transfer learning for latin and chinese characters with deep neural networks", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "C" |
| ], |
| "last": "Cirean", |
| "suffix": "" |
| }, |
| { |
| "first": "U", |
| "middle": [], |
| "last": "Meier", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "The 2012 International Joint Conference on Neural Networks (IJCNN)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. C. Cirean, U. Meier, and J. Schmidhuber. 2012. Transfer learning for latin and chinese characters with deep neural networks. In The 2012 Internatio- nal Joint Conference on Neural Networks (IJCNN).", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Swisscheese at semeval-2016 task 4 : Sentiment classification using an ensemble of convolutional neural networks with distant supervision", |
| "authors": [ |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Deriu", |
| "suffix": "" |
| }, |
| { |
| "first": "Maurice", |
| "middle": [], |
| "last": "Gonzenbach", |
| "suffix": "" |
| }, |
| { |
| "first": "Fatih", |
| "middle": [], |
| "last": "Uzdilli", |
| "suffix": "" |
| }, |
| { |
| "first": "Aur\u00e9lien", |
| "middle": [], |
| "last": "Lucchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Valeria", |
| "middle": [ |
| "De" |
| ], |
| "last": "Luca", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Jaggi", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 10th International Workshop on Semantic Evaluation, SemEval@NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jan Deriu, Maurice Gonzenbach, Fatih Uzdilli, Aur\u00e9lien Lucchi, Valeria De Luca, and Martin Jaggi. 2016. Swisscheese at semeval-2016 task 4 : Senti- ment classification using an ensemble of convolu- tional neural networks with distant supervision. In Proceedings of the 10th International Workshop on Semantic Evaluation, SemEval@NAACL-HLT, USA.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Domain adaptation for large-scale sentiment classification : A deep learning approach", |
| "authors": [ |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Glorot", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 28th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xavier Glorot, Antoine Bordes, and Yoshua Bengio. 2011. Domain adaptation for large-scale sentiment classification : A deep learning approach. In Procee- dings of the 28th International Conference on Ma- chine Learning, USA.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Mining and summarizing customer reviews", |
| "authors": [ |
| { |
| "first": "Minqing", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Tenth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "168--177", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minqing Hu and Bing Liu. 2004. Mining and summari- zing customer reviews. In Proceedings of the Tenth ACM SIGKDD International Conference on Know- ledge Discovery and Data Mining, Seattle, Washing- ton, USA, pages 168-177.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Nrc-canada : Building the state-of-theart in sentiment analysis of tweets", |
| "authors": [ |
| { |
| "first": "Saif", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the seventh international workshop on Semantic Evaluation Exercises", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif Mohammad, Svetlana Kiritchenko, and Xiaodan Zhu. 2013. Nrc-canada : Building the state-of-the- art in sentiment analysis of tweets. In Proceedings of the seventh international workshop on Semantic Evaluation Exercises (SemEval-2013).", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Crowdsourcing a word-emotion association lexicon", |
| "authors": [ |
| { |
| "first": "Saif", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "D" |
| ], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Computational Intelligence", |
| "volume": "29", |
| "issue": "3", |
| "pages": "436--465", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif Mohammad and Peter D. Turney. 2013. Crowd- sourcing a word-emotion association lexicon. Com- putational Intelligence, 29(3) :436-465.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Semeval-2018 Task 1 : Affect in tweets", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Felipe", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Bravo-Marquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Salameh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of International Workshop on Semantic Evaluation (SemEval-2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad, Felipe Bravo-Marquez, Moham- mad Salameh, and Svetlana Kiritchenko. 2018. Semeval-2018 Task 1 : Affect in tweets. In Procee- dings of International Workshop on Semantic Eva- luation (SemEval-2018), New Orleans, LA, USA.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Using hashtags to capture fine emotion categories from tweets", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Computational Intelligence", |
| "volume": "31", |
| "issue": "2", |
| "pages": "301--326", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad and Svetlana Kiritchenko. 2015. Using hashtags to capture fine emotion catego- ries from tweets. Computational Intelligence, 31(2) :301-326.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Understanding emotions : A dataset of tweets to study interactions between affect categories", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 11th Edition of the Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad and Svetlana Kiritchenko. 2018. Understanding emotions : A dataset of tweets to study interactions between affect categories. In Pro- ceedings of the 11th Edition of the Language Re- sources and Evaluation Conference, Miyazaki, Ja- pan.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Lancaster A at semeval-2017 task 5 : Evaluation metrics matter : predicting sentiment from financial news headlines", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Moore", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Rayson", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation, SemEval@ACL 2017", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Moore and Paul Rayson. 2017. Lancaster A at semeval-2017 task 5 : Evaluation metrics matter : predicting sentiment from financial news headlines. In Proceedings of the 11th International Workshop on Semantic Evaluation, SemEval@ACL 2017, Ca- nada.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Contextual bidirectional long short term memory recurrent neural network language models : A generative approach to sentiment analysis", |
| "authors": [ |
| { |
| "first": "Amr", |
| "middle": [], |
| "last": "Mousa", |
| "suffix": "" |
| }, |
| { |
| "first": "Bjrn", |
| "middle": [], |
| "last": "Schuller", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amr Mousa and Bjrn Schuller. 2017. Contextual bi- directional long short term memory recurrent neural network language models : A generative approach to sentiment analysis. In Proceedings of the 15th Conference of the European Chapter of the Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A new evaluation of a word list for sentiment analysis in microblogs", |
| "authors": [ |
| { |
| "first": "Finn\u00e5rup", |
| "middle": [], |
| "last": "Nielsen", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the ESWC2011 Workshop on 'Making Sense of Microposts' : Big things come in small packages Crete", |
| "volume": "", |
| "issue": "", |
| "pages": "93--98", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Finn\u00c5rup Nielsen. 2011. A new evaluation of a word list for sentiment analysis in microblogs. In Procee- dings of the ESWC2011 Workshop on 'Making Sense of Microposts' : Big things come in small packages Crete, Greece, pages 93-98.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Borut Sluban, and Igor Mozetic", |
| "authors": [ |
| { |
| "first": "Petra", |
| "middle": [ |
| "Kralj" |
| ], |
| "last": "Novak", |
| "suffix": "" |
| }, |
| { |
| "first": "Jasmina", |
| "middle": [], |
| "last": "Smailovic", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Sentiment of emojis. CoRR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Petra Kralj Novak, Jasmina Smailovic, Borut Sluban, and Igor Mozetic. 2015. Sentiment of emojis. CoRR, abs/1509.07761.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Bidirectional recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Kuldip", |
| "middle": [ |
| "K" |
| ], |
| "last": "Paliwal", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "IEEE Trans. Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Schuster and Kuldip K. Paliwal. 1997. Bidirec- tional recurrent neural networks. IEEE Trans. Si- gnal Processing.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Depeche mood : a lexicon for emotion analysis from crowd annotated news", |
| "authors": [ |
| { |
| "first": "Jacopo", |
| "middle": [], |
| "last": "Staiano", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Guerini", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics, ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "427--433", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacopo Staiano and Marco Guerini. 2014. Depeche mood : a lexicon for emotion analysis from crowd annotated news. In Proceedings of the 52nd Annual Meeting of the Association for Computational Lin- guistics, ACL, Baltimore, USA, pages 427-433.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The psychological meaning of words : Liwc and computerized text analysis methods", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Yla", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "W" |
| ], |
| "last": "Tausczik", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pennebaker", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Journal of Language and Social Psychology", |
| "volume": "29", |
| "issue": "", |
| "pages": "24--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yla R. Tausczik and James W. Pennebaker. 2010. The psychological meaning of words : Liwc and compu- terized text analysis methods. Journal of Language and Social Psychology, 29 :24-54.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Transfer learning for speech and language processing", |
| "authors": [ |
| { |
| "first": "Dong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas Fang", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Asia-Pacific Signal and Information Processing Association Annual Summit and Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dong Wang and Thomas Fang Zheng. 2015. Trans- fer learning for speech and language processing. In Asia-Pacific Signal and Information Processing As- sociation Annual Summit and Conference.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "How transferable are features in deep neural networks ?", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Yosinski", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Clune", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Hod", |
| "middle": [], |
| "last": "Lipson", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 27th International Conference on Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. 2014. How transferable are features in deep neural networks ? In Proceedings of the 27th Inter- national Conference on Neural Information Proces- sing Systems.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Transfer learning for low-resource neural machine translation", |
| "authors": [ |
| { |
| "first": "Barret", |
| "middle": [], |
| "last": "Zoph", |
| "suffix": "" |
| }, |
| { |
| "first": "Deniz", |
| "middle": [], |
| "last": "Yuret", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "May", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barret Zoph, Deniz Yuret, Jonathan May, and Kevin Knight. 2016. Transfer learning for low-resource neural machine translation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, USA.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Our transfer learning approach for sentiment analysis. (a) Pre-trained model learned with B-LSTM network with 2 layers of 150 neurons each to predict if a tweet is positive, negative or neutral. (b) The first layers of pre-traind model are locked and re-purposed to predict various levels of positive and negative sentiment intensity.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| } |
| } |
| } |
| } |