| { |
| "paper_id": "S18-1034", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:43:51.465715Z" |
| }, |
| "title": "Label for Affect in Tweets", |
| "authors": [ |
| { |
| "first": "Zi-Yuan", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Sun Yat-sen University", |
| "location": { |
| "settlement": "Kaohsiung", |
| "country": "Taiwan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Chia-Ping", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Sun Yat-sen University", |
| "location": { |
| "settlement": "Kaohsiung", |
| "country": "Taiwan" |
| } |
| }, |
| "email": "cpchen@cse.nsysu.edu.tw" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes our system implementation for subtask V-oc of SemEval-2018 Task 1: affect in tweets. We use multi-task learning method to learn shared representation, then learn the features for each task. There are five classification models in the proposed multitask learning approach. These classification models are trained sequentially to learn different features for different classification tasks. In addition to the data released for SemEval-2018, we use datasets from previous SemEvals during system construction. Our Pearson correlation score is 0.638 on the official SemEval-2018 Task 1 test set.", |
| "pdf_parse": { |
| "paper_id": "S18-1034", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes our system implementation for subtask V-oc of SemEval-2018 Task 1: affect in tweets. We use multi-task learning method to learn shared representation, then learn the features for each task. There are five classification models in the proposed multitask learning approach. These classification models are trained sequentially to learn different features for different classification tasks. In addition to the data released for SemEval-2018, we use datasets from previous SemEvals during system construction. Our Pearson correlation score is 0.638 on the official SemEval-2018 Task 1 test set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In recent years, people began to study how to create computational systems that process and understand the human languages. Today, people share their thoughts on social networks of the Internet, e.g. Facebook, Line, Twitter and so on. Thus, if the messages in the textual contents of social networks can be extracted and summarized automatically via algorithms, it is possible to learn what people are interested in or are concerned with, and use such information to predict future market trends.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Here we continue our previous works on the task 4 of SemEval-2017: Sentiment Analysis in Twitter (Rosenthal et al., 2017) . SemEval-2017 subtask 4A is similar to task 1 of SemEval-2018 : Affect in Tweets (Mohammad et al., 2018 . They are challenging tasks as the messages on Twitter, called tweets, are short and informal. Furthermore, in addition to noisy or incomplete texts, the emotional content of a tweet can be ambiguous and subjective.", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 121, |
| "text": "(Rosenthal et al., 2017)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 172, |
| "end": 184, |
| "text": "SemEval-2018", |
| "ref_id": null |
| }, |
| { |
| "start": 185, |
| "end": 226, |
| "text": ": Affect in Tweets (Mohammad et al., 2018", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Affect in Tweets is an expanded version of WASSA-2017 shared task (Mohammad and Bravo-Marquez, 2017).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The best system in WASSA-2017 is an ensemble of three sets of approaches, including feed-forward neural network, multi-task deep learning and sequence modeling using CNNs and LSTMs (Goel et al., 2017) . They attempt to use the idea of multi-task learning to explore the notion of generalized or shared learning across different emotions. In this paper, we extend the idea with different label methods. The rest of this paper is organized as follows. In Section 2, we introduce our system. In Section 3, we describe the details of training and experimental settings. In Section 4, we present the evaluation results along with our comments.", |
| "cite_spans": [ |
| { |
| "start": 181, |
| "end": 200, |
| "text": "(Goel et al., 2017)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Using RNN has become a very common technique for various NLP tasks. There are many units for RNN-based model like simple RNN, gated recurrent units (GRU) (Chung et al., 2014) , and long short-term memory (LSTM) (Hochreiter and Schmidhuber, 1997) . For the baseline, we use LSTM as unit for its long-range dependency. Figure 1 shows the architecture of our baseline system. Our baseline system contains an input layer, an embedding layer, Bi-LSTM layers and an output layer. At the input layer, the words of tweet are pre-processed, and they are treated as a sequence of words w 1 , w 2 , ...w n . Each word is represented by a one-hot vector, and the size of input layer is equal to the size of word list.", |
| "cite_spans": [ |
| { |
| "start": 154, |
| "end": 174, |
| "text": "(Chung et al., 2014)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 211, |
| "end": 245, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 317, |
| "end": 325, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Baseline System", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "At the embedding layer, each word is converted to a word vector. We use pre-trained word vector which are stored in a matrix. Words are mapped to word vectors by the word embedding matrix. A word not in the word embedding matrix is represented by a zero vector.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline System", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "A Bi-LSTM layer contains h units. We use bidirectional (Schuster and Paliwal, 1997) structure to gather two-way contextual information at each point. The hidden states from the first word to the penultimate word in a tweet are connected to the hidden states of the next word. The state values in both directions are combined with sum. Only the last Bi-LSTM states of the last word are connected to the output layer. Finally, the network output is converted to probability by a soft-max function. ", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 83, |
| "text": "(Schuster and Paliwal, 1997)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline System", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Multi-task learning has been used with success in applications of machine learning, from natural language processing (Collobert and Weston, 2008) and speech recognition (Deng et al., 2013) . By sharing representations with related tasks, a model tends to generalize better on the original task (Ruder, 2017) . In this work, different labels for the same data are exploited in multi-task learning. Figure 2 shows our multi-task learning framework. The overall system is divided into five models. The Three-class model is trained first, and its trained parameters are used to initialize the parameters in other models. Then we train the Negative, Neutral, Positive class models, and their trained parameters are used to initialize the parameters of the Seven class model. The final output is obtained from the Seven class model. Three class model In Three class model, the tweets are converted to the word vector and used as the input to Bi-LSTM layer. The output layer has three units for three classes {\u22121, 0, 1}.", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 145, |
| "text": "(Collobert and Weston, 2008)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 169, |
| "end": 188, |
| "text": "(Deng et al., 2013)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 294, |
| "end": 307, |
| "text": "(Ruder, 2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 397, |
| "end": 405, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Multi-task Learning", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Negative class model The Negative class model has one more Bi-LSTM layer than Three class model. The output layer has four units for four classes {\u22123, \u22122, \u22121, other}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Learning", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Neutral class model The Neutral class model has the same architecture as the Negative class model. The output layer has two units for two classes {0, other}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Learning", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Positive class model The Positive class model has the same architecture as the Negative class model. The output layer has four units for four classes {other, 1, 2, 3}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Learning", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Seven class model The Seven class model combines the Bi-LSTM layers of the Negative class, Neutral class, and Positive class models. Further, it has one additional Bi-LSTM layer. The output layer has seven units for seven classes {\u22123, \u22122, \u22121, 0, 1, 2, 3}. Note that attention mechanism (Luong et al., 2015; Wang et al., 2016) is incorporated in this model.", |
| "cite_spans": [ |
| { |
| "start": 286, |
| "end": 306, |
| "text": "(Luong et al., 2015;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 307, |
| "end": 325, |
| "text": "Wang et al., 2016)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Learning", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We use the dataset provided for the SemEval-2018 shared task (Mohammad et al., 2018), which includes a new dataset and the datasets provided for SemEval-2017 (Rosenthal et al., 2017) . Table 1 summarizes the statistics of these datasets.", |
| "cite_spans": [ |
| { |
| "start": 158, |
| "end": 182, |
| "text": "(Rosenthal et al., 2017)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 185, |
| "end": 192, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The SemEval-2017 dataset consists of three-class data, which is different from the new SemEval-2018 dataset. In order to exploit SemEval-2017 dataset, we modify the data labels. In the baseline system, we change the label to \u00b11, \u00b12, or \u00b13. Adding a lot of data lead to imbalance problem, so we apply two methods of data balance. Method 1 is that adding data to positive and negative classes randomly such that they have same size respectively. Method 2 is that adding data to all classes randomly such that they have 3,000 tweets. Table 1 shows the numbers of data points after these different labeling methods. dataset -3 -2 -1 0 1 2 3 train-18 -129 249 78 341 167 92 125 1,181 train-17 -8,581 18,186 15, ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 620, |
| "end": 724, |
| "text": "-3 -2 -1 0 1 2 3 train-18 -129 249 78 341 167 92 125 1,181 train-17 -8,581 18,186 15,", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Different Labeling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "labels Negative Neutral Positive total", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Different Labeling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We begin with basic pre-processing methods (Yang et al., 2017) , e.g. splitting a tweet into word, replacing URLs and USERs with normalization patterns <URL> and <USER>, and converting uppercase letters to lowercase letters. As tweets are informal and complex, the basic preprocessing is too simple to convey enough important information. Tweets often have emoticons and hashtags, which could be instrumental to sentiment analysis. Thus, we use text processing tool 1 (Baziotis et al., 2017) to improve text normalization, including sentiment-aware tokenization, spell correction, word normalization, word segmentation (for splitting hashtags). and word annotation.", |
| "cite_spans": [ |
| { |
| "start": 43, |
| "end": 62, |
| "text": "(Yang et al., 2017)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-processing", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The early stopping method is used to prevent overfitting when the loss of a development set ceases to decrease for a few epochs. We randomly take 20% of SemEval-2018 train data as the development set for early stopping and the remaining 80% data as the train set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Early Stopping", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "The maximum length for any tweet in the used datasets is n = 99. The embedding is based on a publicly available set of word vectors learned from 400 million tweets for the ACL WNUT 2015 shared task (Baldwin et al., 2015 ).", |
| "cite_spans": [ |
| { |
| "start": 198, |
| "end": 219, |
| "text": "(Baldwin et al., 2015", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "The baseline system uses 4 hidden Bi-LSTM layers, with 300 neurons in each layer. Dropout method with probability 0.3 is used to prevent the model from overfitting (Srivastava et al., 2014 ).", |
| "cite_spans": [ |
| { |
| "start": 164, |
| "end": 188, |
| "text": "(Srivastava et al., 2014", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "1 github.com/cbaziotis/ekphrasis In the multi-task learning approach, the numbers of neurons in the Bi-LSTM and hidden layers are [200, 200] , [200, 150, 200] , [200, 150, 100] , [200, 150, 200] , [200, [150, 150, 150], 200, 200] for the 5 different class models, respectively.", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 135, |
| "text": "[200,", |
| "ref_id": null |
| }, |
| { |
| "start": 136, |
| "end": 140, |
| "text": "200]", |
| "ref_id": null |
| }, |
| { |
| "start": 143, |
| "end": 148, |
| "text": "[200,", |
| "ref_id": null |
| }, |
| { |
| "start": 149, |
| "end": 153, |
| "text": "150,", |
| "ref_id": null |
| }, |
| { |
| "start": 154, |
| "end": 158, |
| "text": "200]", |
| "ref_id": null |
| }, |
| { |
| "start": 161, |
| "end": 166, |
| "text": "[200,", |
| "ref_id": null |
| }, |
| { |
| "start": 167, |
| "end": 171, |
| "text": "150,", |
| "ref_id": null |
| }, |
| { |
| "start": 172, |
| "end": 176, |
| "text": "100]", |
| "ref_id": null |
| }, |
| { |
| "start": 179, |
| "end": 184, |
| "text": "[200,", |
| "ref_id": null |
| }, |
| { |
| "start": 185, |
| "end": 189, |
| "text": "150,", |
| "ref_id": null |
| }, |
| { |
| "start": 190, |
| "end": 194, |
| "text": "200]", |
| "ref_id": null |
| }, |
| { |
| "start": 197, |
| "end": 229, |
| "text": "[200, [150, 150, 150], 200, 200]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "First, we compare the experiments of different labeling in baseline system to decide how to use the train-17 dataset. In baseline system, we use the basic pre-processing for text normalization. The results are shown in Table 2 . The calculation of Pearson correlation coefficient (Pcc.) requires calculating the mean value of the data, which is often close to zero. From the results, labeling to more distant from zero get the higher Pcc. Therefore, we use labeling to \u00b13 method in the multi-task learning system. Table 3 : Results of multi-task learning. Final row is the official SemEval-2018 test set result and others are development set results. Here * means using the ekphrasis tool for pre-processing and s-m means some-emotion.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 219, |
| "end": 226, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 514, |
| "end": 521, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Baseline System", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "baseline system. When the basic pre-processing method is replaced by using ekphrasis tool, the performance is further improved. Finally, we submit the results from our best system for the unseen test set to SemEval-2018, getting 0.638 for Pcc. eventually. We note this is significantly lower than 0.691 on the development data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-task Learning System", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The proposed method improves performance on SemEval-2018 over baseline systems without multi-task learning. External dataset can significantly improve the Pcc. performance, but not the Acc. performance. The possible reason is that all the labels of external dataset are marked as \u00b13, resulting in data imbalance problem. In the future, we will use skewness-robust weights to solve this problem and use more resources to improve the system as sentiment lexicons.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Shared tasks of the 2015 workshop on noisy user-generated text: Twitter lexical normalization and named entity recognition", |
| "authors": [ |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Catherine", |
| "middle": [], |
| "last": "De Marneffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Young-Bum", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Workshop on Noisy User-generated Text", |
| "volume": "", |
| "issue": "", |
| "pages": "126--135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timothy Baldwin, Marie-Catherine de Marneffe, Bo Han, Young-Bum Kim, Alan Ritter, and Wei Xu. 2015. Shared tasks of the 2015 workshop on noisy user-generated text: Twitter lexical normal- ization and named entity recognition. In Proceed- ings of the Workshop on Noisy User-generated Text, pages 126-135.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Datastories at semeval-2017 task 4: Deep lstm with attention for message-level and topic-based sentiment analysis", |
| "authors": [ |
| { |
| "first": "Christos", |
| "middle": [], |
| "last": "Baziotis", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikos", |
| "middle": [], |
| "last": "Pelekis", |
| "suffix": "" |
| }, |
| { |
| "first": "Christos", |
| "middle": [], |
| "last": "Doulkeridis", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)", |
| "volume": "", |
| "issue": "", |
| "pages": "747--754", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christos Baziotis, Nikos Pelekis, and Christos Doulk- eridis. 2017. Datastories at semeval-2017 task 4: Deep lstm with attention for message-level and topic-based sentiment analysis. In Proceedings of the 11th International Workshop on Semantic Evalu- ation (SemEval-2017), pages 747-754. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Empirical evaluation of gated recurrent neural networks on sequence modeling", |
| "authors": [ |
| { |
| "first": "Junyoung", |
| "middle": [], |
| "last": "Chung", |
| "suffix": "" |
| }, |
| { |
| "first": "Caglar", |
| "middle": [], |
| "last": "Gulcehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.3555" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, and Yoshua Bengio. 2014. Empirical evaluation of gated recurrent neural networks on sequence model- ing. arXiv preprint arXiv:1412.3555.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A unified architecture for natural language processing: Deep neural networks with multitask learning", |
| "authors": [ |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 25th international conference on Machine learning", |
| "volume": "", |
| "issue": "", |
| "pages": "160--167", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronan Collobert and Jason Weston. 2008. A unified architecture for natural language processing: Deep neural networks with multitask learning. In Pro- ceedings of the 25th international conference on Machine learning, pages 160-167. ACM.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "New types of deep neural network learning for speech recognition and related applications: An overview", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Kingsbury", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Acoustics, Speech and Signal Processing (ICASSP), 2013 IEEE International Conference on", |
| "volume": "", |
| "issue": "", |
| "pages": "8599--8603", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li Deng, Geoffrey Hinton, and Brian Kingsbury. 2013. New types of deep neural network learning for speech recognition and related applications: An overview. In Acoustics, Speech and Signal Process- ing (ICASSP), 2013 IEEE International Conference on, pages 8599-8603. IEEE.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Prayas at emoint 2017: An ensemble of deep neural architectures for emotion intensity prediction in tweets", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Goel", |
| "suffix": "" |
| }, |
| { |
| "first": "Devang", |
| "middle": [], |
| "last": "Kulshreshtha", |
| "suffix": "" |
| }, |
| { |
| "first": "Prayas", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaushal Kumar", |
| "middle": [], |
| "last": "Shukla", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 8th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis", |
| "volume": "", |
| "issue": "", |
| "pages": "58--65", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Goel, Devang Kulshreshtha, Prayas Jain, and Kaushal Kumar Shukla. 2017. Prayas at emoint 2017: An ensemble of deep neural architectures for emotion intensity prediction in tweets. In Pro- ceedings of the 8th Workshop on Computational Ap- proaches to Subjectivity, Sentiment and Social Me- dia Analysis, pages 58-65.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Effective approaches to attentionbased neural machine translation", |
| "authors": [ |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1508.04025" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minh-Thang Luong, Hieu Pham, and Christopher D Manning. 2015. Effective approaches to attention- based neural machine translation. arXiv preprint arXiv:1508.04025.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "WASSA-2017 shared task on emotion intensity", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Felipe", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bravo-Marquez", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis (WASSA)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad and Felipe Bravo-Marquez. 2017. WASSA-2017 shared task on emotion intensity. In Proceedings of the Workshop on Computational Ap- proaches to Subjectivity, Sentiment and Social Me- dia Analysis (WASSA), Copenhagen, Denmark.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Semeval-2018 Task 1: Affect in tweets", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Saif", |
| "suffix": "" |
| }, |
| { |
| "first": "Felipe", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Bravo-Marquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Salameh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of International Workshop on Semantic Evaluation (SemEval-2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif M. Mohammad, Felipe Bravo-Marquez, Mo- hammad Salameh, and Svetlana Kiritchenko. 2018. Semeval-2018 Task 1: Affect in tweets. In Proceed- ings of International Workshop on Semantic Evalu- ation (SemEval-2018), New Orleans, LA, USA.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "SemEval-2017 task 4: Sentiment analysis in Twitter", |
| "authors": [ |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "Noura", |
| "middle": [], |
| "last": "Farra", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation, SemEval '17", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sara Rosenthal, Noura Farra, and Preslav Nakov. 2017. SemEval-2017 task 4: Sentiment analysis in Twitter. In Proceedings of the 11th International Workshop on Semantic Evaluation, SemEval '17, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "An overview of multi-task learning in", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "deep neural networks", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1706.05098" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Ruder. 2017. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Bidirectional recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Kuldip", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Paliwal", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "IEEE Transactions on Signal Processing", |
| "volume": "45", |
| "issue": "11", |
| "pages": "2673--2681", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Schuster and Kuldip K Paliwal. 1997. Bidirec- tional recurrent neural networks. IEEE Transactions on Signal Processing, 45(11):2673-2681.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Skewness-Robust Neural Networks with Application to Speech Emotion Recognition", |
| "authors": [ |
| { |
| "first": "Po-Yuan", |
| "middle": [], |
| "last": "Shih", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Po-Yuan Shih. 2016. Skewness-Robust Neural Net- works with Application to Speech Emotion Recog- nition. Ph.D. thesis, Masters thesis, National Sun Yat-sen University.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Dropout: A simple way to prevent neural networks from overfitting", |
| "authors": [ |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "15", |
| "issue": "1", |
| "pages": "1929--1958", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: A simple way to prevent neural networks from overfitting. The Journal of Machine Learning Research, 15(1):1929-1958.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Attention-based lstm for aspect-level sentiment classification", |
| "authors": [ |
| { |
| "first": "Yequan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Minlie", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "606--615", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yequan Wang, Minlie Huang, Li Zhao, et al. 2016. Attention-based lstm for aspect-level sentiment clas- sification. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Process- ing, pages 606-615.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "deepsa at semeval-2017 task 4: Interpolated deep neural networks for sentiment analysis in twitter", |
| "authors": [ |
| { |
| "first": "Tzu-Hsuan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tzu-Hsuan", |
| "middle": [], |
| "last": "Tseng", |
| "suffix": "" |
| }, |
| { |
| "first": "Chia-Ping", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)", |
| "volume": "", |
| "issue": "", |
| "pages": "616--620", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tzu-Hsuan Yang, Tzu-Hsuan Tseng, and Chia-Ping Chen. 2017. deepsa at semeval-2017 task 4: Inter- polated deep neural networks for sentiment analysis in twitter. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 616-620. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Nnembs at semeval-2017 task 4: Neural twitter sentiment classification: a simple ensemble method with different embeddings", |
| "authors": [ |
| { |
| "first": "Yichun", |
| "middle": [], |
| "last": "Yin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangqiu", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "621--625", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yichun Yin, Yangqiu Song, and Ming Zhang. 2017. Nnembs at semeval-2017 task 4: Neural twitter sen- timent classification: a simple ensemble method with different embeddings. In Proceedings of the 11th International Workshop on Semantic Evalua- tion (SemEval-2017), pages 621-625.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "text": "LSTM-RNN architecture.", |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "uris": null, |
| "text": "Multi-task learning of sentiment classification.", |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Statistics of our different labeling methods and datasets. train-18 and dev-18 are from SemEval-2018 Task 1. train-17 is from SemEval-2017 task 4. train-all means the merger of the train-18 and train-17 datasets.", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "num": null, |
| "text": "Results of different labeling. Pcc. means the pearson correlation coefficient (all classes). Acc. means the accuracy.", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td>shows the results of multi-task learning.</td></tr><tr><td>With basic pre-processing for text normalization,</td></tr><tr><td>the multi-task learning system is better than the</td></tr></table>", |
| "num": null, |
| "text": "", |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |