| { |
| "paper_id": "S19-2008", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:46:25.650428Z" |
| }, |
| "title": "Atalaya at SemEval 2019 Task 5: Robust Embeddings for Tweet Classification", |
| "authors": [ |
| { |
| "first": "Juan", |
| "middle": [ |
| "Manuel" |
| ], |
| "last": "P\u00e9rez", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universidad de Buenos Aires", |
| "location": {} |
| }, |
| "email": "jmperez@dc.uba.ar" |
| }, |
| { |
| "first": "Franco", |
| "middle": [ |
| "M" |
| ], |
| "last": "Luque", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universidad Nacional de C\u00f3rdoba CONICET", |
| "location": {} |
| }, |
| "email": "francolq@famaf.unc.edu.ar" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this article, we describe our participation in HatEval, a shared task aimed at the detection of hate speech against immigrants and women. We focused on Spanish subtasks, building from our previous experiences on sentiment analysis in this language. We trained linear classifiers and Recurrent Neural Networks, using classic features, such as bagof-words, bag-of-characters, and word embeddings, and also with recent techniques such as contextualized word representations. In particular, we trained robust task-oriented subwordaware embeddings and computed tweet representations using a weighted-averaging strategy. In the final evaluation, our systems showed competitive results for both Spanish subtasks ES-A and ES-B, achieving the first and fourth places respectively.", |
| "pdf_parse": { |
| "paper_id": "S19-2008", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this article, we describe our participation in HatEval, a shared task aimed at the detection of hate speech against immigrants and women. We focused on Spanish subtasks, building from our previous experiences on sentiment analysis in this language. We trained linear classifiers and Recurrent Neural Networks, using classic features, such as bagof-words, bag-of-characters, and word embeddings, and also with recent techniques such as contextualized word representations. In particular, we trained robust task-oriented subwordaware embeddings and computed tweet representations using a weighted-averaging strategy. In the final evaluation, our systems showed competitive results for both Spanish subtasks ES-A and ES-B, achieving the first and fourth places respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Hate speech against women, immigrants, and many other groups is a pervasive phenomenon on the Internet. On the early days of the World Wide Web, many academics adventured that prejudices and hatred would be removed in this space by the dissolution of identities (L\u00e9vy, 2001; Rheingold, 1993) . Twenty years after this hypothesis, we can say that it has not been the case. The prevalence of racism in the \"World White Web\" has been studied in a number of works (Adams and Roscigno, 2005; Kettrey and Laster, 2014) and so has been the misogyny in the virtual world (Filipovic, 2007; Mantilla, 2013) .", |
| "cite_spans": [ |
| { |
| "start": 262, |
| "end": 274, |
| "text": "(L\u00e9vy, 2001;", |
| "ref_id": null |
| }, |
| { |
| "start": 275, |
| "end": 291, |
| "text": "Rheingold, 1993)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 460, |
| "end": 486, |
| "text": "(Adams and Roscigno, 2005;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 487, |
| "end": 512, |
| "text": "Kettrey and Laster, 2014)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 563, |
| "end": 580, |
| "text": "(Filipovic, 2007;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 581, |
| "end": 596, |
| "text": "Mantilla, 2013)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Racist and sexist discourse are a constant in social media, but peaks are documented after \"trigger\" events, such as murders with religious or political reasons (Burnap and Williams, 2015) . Most social media companies are concerned about this issue and take actions against it; nonetheless, most of the efforts still need human intervention, making this task very expensive. Therefore, reducing human intervention is vital in order to have effective tools to avoid the escalation of hate speech.", |
| "cite_spans": [ |
| { |
| "start": 161, |
| "end": 188, |
| "text": "(Burnap and Williams, 2015)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "HatEval (Basile et al., 2019 ) is a SemEval-2019 shared task aimed at the detection of hate speech towards immigrants and women in tweets. It comprises two subtasks, with datasets in English (EN) and Spanish (ES) for both of them, giving a total of four subtasks. Subtask A is the binary classification of tweets into hateful or not hateful (HS). Subtask B is a triple binary classification task where, in addition to HS, tweets are classified into aggressive or not aggressive (AG), and targets of hate speech are classified into single humans or groups of persons (TR).", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 28, |
| "text": "(Basile et al., 2019", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 191, |
| "end": 195, |
| "text": "(EN)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this article, we present our participation in HatEval as team Atalaya. We focused our efforts on subtask A for Spanish (ES-A) but also worked at subtask B in Spanish (ES-B) and subtask A in English (EN-A). Our systems are based on our participation in the polarity classification task of Spanish tweets TASS 2018 (Sentiment Analysis at SEPLN) (Mart\u00ednez-C\u00e1mara et al., 2018; Luque and P\u00e9rez, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 346, |
| "end": 376, |
| "text": "(Mart\u00ednez-C\u00e1mara et al., 2018;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 377, |
| "end": 399, |
| "text": "Luque and P\u00e9rez, 2018)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To represent tweets, we experimented with a mixed approach of bag-of-words, bag-ofcharacters and tweet embeddings, which were calculated from word vectors using different averaging schemes. We used fastText (Bojanowski et al., 2016) to get subword-aware representations specifically trained for sentiment analysis tasks.", |
| "cite_spans": [ |
| { |
| "start": 207, |
| "end": 232, |
| "text": "(Bojanowski et al., 2016)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "These word representations are robust to noise since they can be computed for unseen words by using subword embeddings. Moreover, we trained them using a database of 90M tweets from various Spanish-speaking countries, giving wide domainspecific vocabulary coverage. We achieved additional robustness by doing preprocessing using several text-normalization and noise-reduction techniques.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Also, we experimented with ELMo (Peters et al., 2018) , a deep contextualized word representation that has drawn a lot of attention in the last months. Unlike fastText, ELMo returns context-dependent embeddings from a multi-layer bidirectional-LSTM language model. These representations improved the state-of-the-art of several NLP tasks. For the neural approach, we used bidirectional LSTMs to combine the word embeddings. We also did experiments that mix sequential models with complementary representations such as bagof-words.", |
| "cite_spans": [ |
| { |
| "start": 32, |
| "end": 53, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of the paper is as follows. Next Section presents the primary tools we used to build our systems. Section 3 presents the configuration and development of both linear and neural models. Section 4 briefly shows our results in the competition, and Section 5 concludes the work with some observations about our experience.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The detection of hate speech is a sentence classification task quite related to sentiment analysis and has been studied for several social media networks (Thelwall, 2008; Pak and Paroubek, 2010; Saleem et al., 2017) . Regarding the detection of hateful content, Greevy and Smeaton (2004) used bag-ofwords and SVMs to detect racist content in web pages. Following a similar approach, Warner and Hirschberg (2012) used unigrams and Brown clusters with SVMs to detect anti-semitic messages on Twitter.", |
| "cite_spans": [ |
| { |
| "start": 154, |
| "end": 170, |
| "text": "(Thelwall, 2008;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 171, |
| "end": 194, |
| "text": "Pak and Paroubek, 2010;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 195, |
| "end": 215, |
| "text": "Saleem et al., 2017)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 262, |
| "end": 287, |
| "text": "Greevy and Smeaton (2004)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Previous Work", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "Waseem and Hovy (2016) annotated a corpus and used character n-grams to detect hateful comments, and Badjatiya et al. (2017) used the same dataset to train deep learning models and finetuned embeddings along with Gradient Boosted Trees. Zhang et al. (2018) trained a deep neural network combining CNNs with Gated-recurrent units (Cho et al., 2014) , outperforming previous systems in several datasets. collected a corpus of misogynous tweets and proposed a taxonomy to distinguish them into different categories. The authors proposed a number of different techniques to classify them, showing that simple approaches (as using linear models along with token n-grams) achieve competitive performance on small-sized datasets.", |
| "cite_spans": [ |
| { |
| "start": 101, |
| "end": 124, |
| "text": "Badjatiya et al. (2017)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 237, |
| "end": 256, |
| "text": "Zhang et al. (2018)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 329, |
| "end": 347, |
| "text": "(Cho et al., 2014)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Previous Work", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "Regarding shared tasks, Fersini et al. (2018a) presented a challenge on misogyny detection on Twitter -both in Spanish and English-whereas Fersini et al. (2018b) posed a similar challenge but in Italian and English. Bosco et al. (2018) proposed an automatic detection contest over Twitter posts and Facebook comments, comprising general hate speech.", |
| "cite_spans": [ |
| { |
| "start": 216, |
| "end": 235, |
| "text": "Bosco et al. (2018)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Previous Work", |
| "sec_num": "1.1" |
| }, |
| { |
| "text": "Preprocessing is crucial in NLP applications, especially when working with noisy user-generated data. Here, we followed Luque and P\u00e9rez (2018), defining two levels of preprocessing: basic and sentiment-oriented preprocessing. We used one or the other, depending on the configuration.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Basic tweet preprocessing includes tokenization, replacement of handles, URLs, and e-mails, and shortening of repeated letters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Sentiment-oriented preprocessing includes lowercasing, removal of punctuation, stopword, and numbers, lemmatization -using TreeTagger (Schmid, 1995) -and negation handling. For negation handling, we followed a simple approach: We find negation words and add the prefix 'NOT ' to the following tokens. Up to three tokens are negated, or less if a non-word token is found.", |
| "cite_spans": [ |
| { |
| "start": 134, |
| "end": 148, |
| "text": "(Schmid, 1995)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The simplest approach considered to build tweet representations was bag-of-words encoding. A bag-of-words (BoW) builds feature vectors for each token seen in training data. For a particular tweet, its BoW vector contains the number of occurrences of each token on it, resulting in high-dimensional and sparse vectors. Variations of BoW include counting not only single tokens but also n-grams of tokens, binarizing counts, and limiting the number of features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bags of Words and Characters", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Character usage in tweets may also hold useful information for sentiment analysis. Character n-grams -such as the presence and repetition of uppercase letters, emoticons, and exclamation marks-may indicate a strong presence of sentiment of some kind, where others may indicate a more formal writing style, and therefore an absence of sentiment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bags of Words and Characters", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "To capture this information, we considered a bag-of-characters (BoC) representation that encodes counts of character n-grams for some values of n. These vectors are computed from original texts of tweets, with no preprocessing at all. BoCs have the same variants and parameters as BoWs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bags of Words and Characters", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We used fastText, a subword-aware embeddings library (Bojanowski et al., 2016) to get contextindependent word representations. Instead of using publicly available pre-trained vectors, we trained our own embeddings on a dataset of \u223c 90 million tweets from various Spanish-speaking countries. We prepared two versions of the data: one using only basic preprocessing, and the other using sentiment-oriented preprocessing (with the exception of excepting lemmatization). For these two datasets, skip-gram embeddings were trained using different parameter configurations, including a number of dimensions, size of word and subword n-grams, and size of context window.", |
| "cite_spans": [ |
| { |
| "start": 53, |
| "end": 78, |
| "text": "(Bojanowski et al., 2016)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embeddings", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Linear combinations were used to compute a representation for a single tweet. We followed two simple approaches: plain average and weighted average. In the second case, we used a scheme that resembles Smooth Inverse Frequency (SIF) (Arora et al., 2017) , inspired by TF-IDF reweighting. Each word w is weighted with a a+p(w) , where p(w) is the word unigram probability, and a is a smoothing hyper-parameter. Big values of a mean more smoothing towards plain averaging.", |
| "cite_spans": [ |
| { |
| "start": 232, |
| "end": 252, |
| "text": "(Arora et al., 2017)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tweet Embeddings", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "After the great leap forward that represented context-independent word embeddings, a new wave came in the last years. Instead of having vectors trained for each word, context-dependent representations are generated for each token given a sentence. For instance, McCann et al. (2017) used a deep LSTM encoder for Machine Translation to generate context-aware vectors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-Dependent Embeddings", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "ELMo (Peters et al., 2018) is one of these context-dependent approaches and is based on a deep bidirectional language model (biLM). The architecture of the language model consists of L layers of bidirectional LSTMs, plus a contextindependent token representation. Hence, for each token in a sequence, we get 2L + 1 vector representations. To obtain a final vector for each token, the authors suggest collapsing the layers into vectors by means of a linear combination.", |
| "cite_spans": [ |
| { |
| "start": 5, |
| "end": 26, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-Dependent Embeddings", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "In this work, we used the implementation and pre-trained models from Che et al. (2018) . The Spanish model was trained with L = 2 layers and 1024 dimensions, and the linear combination was done using a simple average.", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 86, |
| "text": "Che et al. (2018)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context-Dependent Embeddings", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "In this section, we describe the models we used in the competition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The first set of models we trained were simple classifying models implemented with scikit-learn (Pedregosa et al., 2011) .", |
| "cite_spans": [ |
| { |
| "start": 96, |
| "end": 120, |
| "text": "(Pedregosa et al., 2011)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linear Classifiers", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We started from the optimal configuration from Luque and P\u00e9rez (2018), that combines bag-ofwords (BoW), bag-of-characters (BoC) and tweet embeddings as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linear Classifiers", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 BoW: All unigrams and bigrams of words, with binarized counts and TF-IDF reweighting. For the Spanish training dataset, this encoding gives 53504 sparse features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linear Classifiers", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 BoC: All n-grams of characters for n \u2264 5, with binarized counts and TF-IDF reweighting. For the Spanish training dataset, it gives 226156 sparse features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linear Classifiers", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 Tweet embeddings: Computed from fastText sentiment-oriented word vectors of 50 dimensions. Weighted averaging was done as described in Section 2.4, with a smoothing value of a = 0.1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linear Classifiers", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Here, the only parameters specifically optimized using the HatEval development set were the ngram ranges considered for BoW and BoC. Using this vectorial representation we trained logistic regressions and linear-kernel SVMs with different hyperparameter values. The best results are shown in the first block of Tab. 1, as LR 0 and SVM 0 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linear Classifiers", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Next, to confirm the relevance of each of the three components, we performed ablation tests for each of them. Results are displayed as SVM BoW , SVM BoC and SVM emb in Tab. 1. Drops in the performance show the relevance of all components, especially for BoW and BoC.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linear Classifiers", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Next, we tried adding tweet representations computed from ELMo vectors. Full tweet vectors were obtained by doing simple un-weighted averaging. PCA was optionally used to reduce the dimension of final vectors. The best results were obtained using PCA to reduce from the original 1024 to 100 dimensions. Results are shown as SVM ELM o in Tab. 1. It can be seen that, under this configuration, we are not able to improve our results using ELMo.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linear Classifiers", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To participate in the Spanish subtask B (ES-B) we used a very naive approach. We didn't develop or tune a specific system for this subtask but instead used the same system and configuration that was found optimal for subtask A. To do this, we first mapped the triple classification problem to a 5-way classification problem for all the possible label combinations:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linear Classifiers", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "HS AG TR 0 0 0 1 0 0 1 0 1 1 1 0 1 1 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linear Classifiers", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Then, we simply trained the classifier using the Spanish subtask B training dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linear Classifiers", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The second set of models we trained are neural models. We trained Recurrent Neural Networks (RNNs) using pre-trained context-dependent representations for Spanish. The first model considered was a bidirectional LSTM with a dense layer on top, consuming ELMo vectors; we call this model LSTM-ELMo. Also, we tried another model by adding a second input consisting of a bag-of-words, as illustrated in Figure 1 . We call this model LSTM-ELMo+BoW. Using fastText embeddings (of dimension 300 and context window 5) instead of BoW was considered as suggested by Peters et al. (2018) but discarded as it had no positive impact in performance (in the development dataset).", |
| "cite_spans": [ |
| { |
| "start": 556, |
| "end": 576, |
| "text": "Peters et al. (2018)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 399, |
| "end": 407, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Neural Models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The biLSTM layer consists of 256 units. The bag-of-words has the 3500 most-frequent n-grams (having document-frequency less than 0.65), fol-lowed by a 512-unit dense layer. The two last dense layers have 64 neurons.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We used Keras (Chollet et al., 2015) to implement and train our models. Adam (Kingma and Ba, 2014) was the chosen optimizer, with lr = 35 * 10 \u22125 and decay = 0.01. To regularize our models, we applied dropout with keep-prob of 0.2 on the first layer, and 0.45 on the second, and we also early-stopped the training monitoring the performance on the development dataset. The hyperparameters were chosen from a small random search, as training ELMo is computationally expensive. Table 2 displays the evaluation results for the three classifiers trained for subtask A: SVM 0 , and both neural models LSTM-ELMo and LSTM-ELMo+BoW. For Spanish, the best performing system was SVM 0 . Despite its simplicity, it ranked first in terms of average F1 in the official results. Among the neural models, LSTM-ELMo+BoW performed best, and ranked in position 17 for Spanish in terms of average F1. 1 We can observe that LSTM-ELMo+BoW performs better on the development set, although its performance decreases sharply in the test set. In spite of the applied regularization, we might have incurred in overfitting during model selection (Cawley and Talbot, 2010) as the chosen model has higher variance than LSTM-ELMo. This last model achieved similar results to SVM 0 . This difference between the models was not seen in English. For the Spanish subtask B (ES-B), the same SVM 0 system was used, achieving an average F1 of 0.758 and an EMR score of 0.657 over the test set (fourth place in terms of EMR).", |
| "cite_spans": [ |
| { |
| "start": 14, |
| "end": 36, |
| "text": "(Chollet et al., 2015)", |
| "ref_id": null |
| }, |
| { |
| "start": 882, |
| "end": 883, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 476, |
| "end": 483, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Neural Models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As in our previous experience with sentiment analysis, we found that linear models can be a match for neural models. Moreover, this time our SVM ranked in the first place in one of the subtasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We believe that -for this kind of challenges with small-sized datasets-preprocessing techniques, data normalization and robustness play a stronger role than model design and hyperparameter tuning. On the other hand, deep neural models are highly expressive and prone to overfitting, requiring being extremely careful with regularization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Results shown in Tab. 2 differ from the ones in the leaderboard as we couldn't exactly reproduce the experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We are grateful to Pablo Brusco for providing us with helpful comments. This material is based upon work supported by the Air Force Office of Scientific Research under award number FA9550-18-1-0026, and also by a research grant from Se-CyT, Universidad Nacional de C\u00f3rdoba.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "White supremacists, oppositional culture and the world wide web", |
| "authors": [ |
| { |
| "first": "Josh", |
| "middle": [], |
| "last": "Adams", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Roscigno", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Social Forces", |
| "volume": "84", |
| "issue": "2", |
| "pages": "759--778", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Josh Adams and Vincent J Roscigno. 2005. White supremacists, oppositional culture and the world wide web. Social Forces, 84(2):759-778.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Automatic identification and classification of misogynistic language on twitter", |
| "authors": [ |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Anzovino", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Conference on Applications of Natural Language to Information Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "57--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maria Anzovino, Elisabetta Fersini, and Paolo Rosso. 2018. Automatic identification and classification of misogynistic language on twitter. In International Conference on Applications of Natural Language to Information Systems, pages 57-64. Springer.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A simple but tough-to-beat baseline for sentence embeddings", |
| "authors": [ |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Arora", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingyu", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tengyu", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sanjeev Arora, Yingyu Liang, and Tengyu Ma. 2017. A simple but tough-to-beat baseline for sentence em- beddings. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Deep learning for hate speech detection in tweets", |
| "authors": [ |
| { |
| "first": "Pinkesh", |
| "middle": [], |
| "last": "Badjatiya", |
| "suffix": "" |
| }, |
| { |
| "first": "Shashank", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Manish", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Vasudeva", |
| "middle": [], |
| "last": "Varma", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 26th International Conference on World Wide Web Companion", |
| "volume": "", |
| "issue": "", |
| "pages": "759--760", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pinkesh Badjatiya, Shashank Gupta, Manish Gupta, and Vasudeva Varma. 2017. Deep learning for hate speech detection in tweets. In Proceedings of the 26th International Conference on World Wide Web Companion, pages 759-760. International World Wide Web Conferences Steering Committee.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Semeval-2019 task 5: Multilingual detection of hate speech against immigrants and women in twitter", |
| "authors": [ |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Rangel", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation (SemEval-2019)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valerio Basile, Cristina Bosco, Elisabetta Fersini, Deb- ora Nozza, Viviana Patti, Francisco Rangel, Paolo Rosso, and Manuela Sanguinetti. 2019. Semeval- 2019 task 5: Multilingual detection of hate speech against immigrants and women in twitter. In Pro- ceedings of the 13th International Workshop on Se- mantic Evaluation (SemEval-2019). Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1607.04606" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2016. Enriching word vec- tors with subword information. arXiv preprint arXiv:1607.04606.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Overview of the evalita 2018 hate speech detection task", |
| "authors": [ |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Dell'orletta Felice", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Poletto", |
| "suffix": "" |
| }, |
| { |
| "first": "Tesconi", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Maurizio", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "EVALITA 2018-Sixth Evaluation Campaign of Natural Language Processing and Speech Tools for Italian", |
| "volume": "2263", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cristina Bosco, Dell'Orletta Felice, Fabio Poletto, Manuela Sanguinetti, and Tesconi Maurizio. 2018. Overview of the evalita 2018 hate speech detection task. In EVALITA 2018-Sixth Evaluation Campaign of Natural Language Processing and Speech Tools for Italian, volume 2263, pages 1-9. CEUR.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Cyber hate speech on twitter: An application of machine classification and statistical modeling for policy and decision making", |
| "authors": [ |
| { |
| "first": "Pete", |
| "middle": [], |
| "last": "Burnap", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Policy & Internet", |
| "volume": "7", |
| "issue": "2", |
| "pages": "223--242", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pete Burnap and Matthew L Williams. 2015. Cyber hate speech on twitter: An application of machine classification and statistical modeling for policy and decision making. Policy & Internet, 7(2):223-242.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "On overfitting in model selection and subsequent selection bias in performance evaluation", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Gavin", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicola Lc", |
| "middle": [], |
| "last": "Cawley", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Talbot", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "11", |
| "issue": "", |
| "pages": "2079--2107", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gavin C Cawley and Nicola LC Talbot. 2010. On over- fitting in model selection and subsequent selection bias in performance evaluation. Journal of Machine Learning Research, 11(Jul):2079-2107.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Towards better UD parsing: Deep contextualized word embeddings, ensemble, and treebank concatenation", |
| "authors": [ |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "Yijia", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuxuan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies", |
| "volume": "", |
| "issue": "", |
| "pages": "55--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wanxiang Che, Yijia Liu, Yuxuan Wang, Bo Zheng, and Ting Liu. 2018. Towards better UD parsing: Deep contextualized word embeddings, ensemble, and treebank concatenation. In Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies, pages 55-64, Brussels, Belgium. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Learning phrase representations using rnn encoder-decoder for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merri\u00ebnboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Caglar", |
| "middle": [], |
| "last": "Gulcehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Fethi", |
| "middle": [], |
| "last": "Bougares", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1406.1078" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyunghyun Cho, Bart Van Merri\u00ebnboer, Caglar Gul- cehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using rnn encoder-decoder for statistical machine translation. arXiv preprint arXiv:1406.1078.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Overview of the task on automatic misogyny identification at ibereval", |
| "authors": [ |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Anzovino", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Workshop on Evaluation of Human Language Technologies for Iberian Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elisabetta Fersini, Maria Anzovino, and Paolo Rosso. 2018a. Overview of the task on automatic misog- yny identification at ibereval. In Proceedings of the Third Workshop on Evaluation of Hu- man Language Technologies for Iberian Languages (IberEval 2018), co-located with 34th Conference of the Spanish Society for Natural Language Process- ing (SEPLN 2018). CEUR Workshop Proceedings. CEUR-WS. org, Seville, Spain.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Proceedings of the 6th evaluation campaign of Natural Language Processing and Speech tools for Italian (EVALITA'18)", |
| "authors": [ |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Fersini", |
| "suffix": "" |
| }, |
| { |
| "first": "Debora", |
| "middle": [], |
| "last": "Nozza", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elisabetta Fersini, Debora Nozza, and Paolo Rosso. 2018b. Overview of the evalita 2018 task on au- tomatic misogyny identification (ami). Proceed- ings of the 6th evaluation campaign of Natural Language Processing and Speech tools for Italian (EVALITA'18), Turin, Italy. CEUR. org.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Blogging while female: How internet misogyny parallels real-world harassment", |
| "authors": [ |
| { |
| "first": "Jill", |
| "middle": [], |
| "last": "Filipovic", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Yale JL & Feminism", |
| "volume": "19", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jill Filipovic. 2007. Blogging while female: How internet misogyny parallels real-world harassment. Yale JL & Feminism, 19:295.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Classifying racist texts using a support vector machine", |
| "authors": [ |
| { |
| "first": "Edel", |
| "middle": [], |
| "last": "Greevy", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [ |
| "F" |
| ], |
| "last": "Smeaton", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 27th annual international ACM SI-GIR conference on Research and development in information retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "468--469", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edel Greevy and Alan F Smeaton. 2004. Classifying racist texts using a support vector machine. In Pro- ceedings of the 27th annual international ACM SI- GIR conference on Research and development in in- formation retrieval, pages 468-469. ACM.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Staking territory in the \"world white web\" an exploration of the roles of overt and color-blind racism in maintaining racial boundaries on a popular web site", |
| "authors": [ |
| { |
| "first": "Whitney", |
| "middle": [ |
| "Nicole" |
| ], |
| "last": "Heather Hensman Kettrey", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Laster", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Social Currents", |
| "volume": "1", |
| "issue": "3", |
| "pages": "257--274", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heather Hensman Kettrey and Whitney Nicole Laster. 2014. Staking territory in the \"world white web\" an exploration of the roles of overt and color-blind racism in maintaining racial boundaries on a popular web site. Social Currents, 1(3):257-274.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Atalaya at TASS 2018: Sentiment analysis with tweet embeddings and data augmentation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Franco", |
| "suffix": "" |
| }, |
| { |
| "first": "Juan", |
| "middle": [ |
| "Manuel" |
| ], |
| "last": "Luque", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "P\u00e9rez", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of TASS 2018: Workshop on Semantic Analysis at SEPLN, TASS@SEPLN 2018, co-located with 34nd SEPLN Conference (SEPLN 2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "29--35", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franco M. Luque and Juan Manuel P\u00e9rez. 2018. Ata- laya at TASS 2018: Sentiment analysis with tweet embeddings and data augmentation. In Proceedings of TASS 2018: Workshop on Semantic Analysis at SEPLN, TASS@SEPLN 2018, co-located with 34nd SEPLN Conference (SEPLN 2018), Sevilla, Spain, September 18th, 2018., pages 29-35.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Gendertrolling: Misogyny adapts to new media", |
| "authors": [ |
| { |
| "first": "Karla", |
| "middle": [], |
| "last": "Mantilla", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Feminist Studies", |
| "volume": "39", |
| "issue": "2", |
| "pages": "563--570", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karla Mantilla. 2013. Gendertrolling: Misogyny adapts to new media. Feminist Studies, 39(2):563- 570.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Overview of TASS 2018: Opinions, health and emotions", |
| "authors": [ |
| { |
| "first": "Eugenio", |
| "middle": [], |
| "last": "Mart\u00ednez-C\u00e1mara", |
| "suffix": "" |
| }, |
| { |
| "first": "Yudivi\u00e1n Almeida", |
| "middle": [], |
| "last": "Cruz", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuel", |
| "middle": [ |
| "C" |
| ], |
| "last": "D\u00edaz-Galiano", |
| "suffix": "" |
| }, |
| { |
| "first": "Suilan", |
| "middle": [ |
| "Est\u00e9vez" |
| ], |
| "last": "Velarde", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel\u00e1", |
| "middle": [], |
| "last": "Garc\u00eda-Cumbreras", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuel", |
| "middle": [], |
| "last": "Garc\u00eda-Vega", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoan", |
| "middle": [ |
| "Guti\u00e9rrez" |
| ], |
| "last": "V\u00e1zquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Arturo", |
| "middle": [ |
| "Montejo" |
| ], |
| "last": "R\u00e1ez", |
| "suffix": "" |
| }, |
| { |
| "first": "Andr\u00e9", |
| "middle": [ |
| "Montoyo" |
| ], |
| "last": "Guijarro", |
| "suffix": "" |
| }, |
| { |
| "first": "Rafael", |
| "middle": [ |
| "Mu\u00f1oz" |
| ], |
| "last": "Guillena", |
| "suffix": "" |
| }, |
| { |
| "first": "Alejandro", |
| "middle": [ |
| "Piad" |
| ], |
| "last": "Morffis", |
| "suffix": "" |
| }, |
| { |
| "first": "Julio", |
| "middle": [], |
| "last": "Villena-Rom\u00e1n", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of TASS 2018: Workshop on Semantic Analysis at SEPLN (TASS 2018)", |
| "volume": "2172", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eugenio Mart\u00ednez-C\u00e1mara, Yudivi\u00e1n Almeida Cruz, Manuel C. D\u00edaz-Galiano, Suilan Est\u00e9vez Velarde, Miguel\u00c1. Garc\u00eda-Cumbreras, Manuel Garc\u00eda-Vega, Yoan Guti\u00e9rrez V\u00e1zquez, Arturo Montejo R\u00e1ez, Andr\u00e9 Montoyo Guijarro, Rafael Mu\u00f1oz Guillena, Alejandro Piad Morffis, and Julio Villena-Rom\u00e1n. 2018. Overview of TASS 2018: Opinions, health and emotions. In Proceedings of TASS 2018: Workshop on Semantic Analysis at SEPLN (TASS 2018), volume 2172 of CEUR Workshop Proceed- ings, Sevilla, Spain. CEUR-WS.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Learned in translation: Contextualized word vectors", |
| "authors": [ |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Mccann", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Bradbury", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "6294--6305", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bryan McCann, James Bradbury, Caiming Xiong, and Richard Socher. 2017. Learned in translation: Con- textualized word vectors. In Advances in Neural In- formation Processing Systems, pages 6294-6305.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Twitter as a corpus for sentiment analysis and opinion mining", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Pak", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Paroubek", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "LREC", |
| "volume": "10", |
| "issue": "", |
| "pages": "1320--1326", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander Pak and Patrick Paroubek. 2010. Twitter as a corpus for sentiment analysis and opinion mining. In LREC, volume 10, pages 1320-1326.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Scikit-learn: Machine learning in Python", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Pedregosa", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Varoquaux", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gramfort", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Thirion", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Grisel", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Blondel", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Prettenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Dubourg", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Vanderplas", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Passos", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Cournapeau", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Brucher", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Perrot", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Duchesnay", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2825--2830", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Pretten- hofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Pas- sos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Deep contextualized word representations. CoRR", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [ |
| "E" |
| ], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. CoRR, abs/1802.05365.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "The virtual community: Finding commection in a computerized world", |
| "authors": [ |
| { |
| "first": "Howard", |
| "middle": [], |
| "last": "Rheingold", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Howard Rheingold. 1993. The virtual commu- nity: Finding commection in a computerized world. Addison-Wesley Longman Publishing Co., Inc.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "A web of hate: Tackling hateful speech in online social spaces", |
| "authors": [ |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Haji", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Saleem", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Kelly", |
| "suffix": "" |
| }, |
| { |
| "first": "Susan", |
| "middle": [], |
| "last": "Dillon", |
| "suffix": "" |
| }, |
| { |
| "first": "Derek", |
| "middle": [], |
| "last": "Benesch", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ruths", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1709.10159" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Haji Mohammad Saleem, Kelly P Dillon, Susan Be- nesch, and Derek Ruths. 2017. A web of hate: Tack- ling hateful speech in online social spaces. arXiv preprint arXiv:1709.10159.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Improvements in part-ofspeech tagging with an application to german", |
| "authors": [ |
| { |
| "first": "Helmut", |
| "middle": [], |
| "last": "Schmid", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Proceedings of the ACL SIGDAT-Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "47--50", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Helmut Schmid. 1995. Improvements in part-of- speech tagging with an application to german. In In Proceedings of the ACL SIGDAT-Workshop, pages 47-50.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Social networks, gender, and friending: An analysis of myspace member profiles", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Thelwall", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Journal of the American Society for Information Science and Technology", |
| "volume": "59", |
| "issue": "8", |
| "pages": "1321--1330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Thelwall. 2008. Social networks, gender, and friending: An analysis of myspace member profiles. Journal of the American Society for Information Sci- ence and Technology, 59(8):1321-1330.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Detecting hate speech on the world wide web", |
| "authors": [ |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Warner", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hirschberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Second Workshop on Language in Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "19--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Warner and Julia Hirschberg. 2012. Detecting hate speech on the world wide web. In Proceed- ings of the Second Workshop on Language in Social Media, pages 19-26. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Hateful symbols or hateful people? predictive features for hate speech detection on twitter", |
| "authors": [ |
| { |
| "first": "Zeerak", |
| "middle": [], |
| "last": "Waseem", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the NAACL student research workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "88--93", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeerak Waseem and Dirk Hovy. 2016. Hateful sym- bols or hateful people? predictive features for hate speech detection on twitter. In Proceedings of the NAACL student research workshop, pages 88-93.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Detecting hate speech on twitter using a convolution-gru based deep neural network", |
| "authors": [ |
| { |
| "first": "Ziqi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Robinson", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Tepper", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "European Semantic Web Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "745--760", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ziqi Zhang, David Robinson, and Jonathan Tepper. 2018. Detecting hate speech on twitter using a convolution-gru based deep neural network. In Eu- ropean Semantic Web Conference, pages 745-760. Springer.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "The LSTM-ELMo+BoW architecture. ELMo and BoW boxes represent inputs.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF2": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "text": "Our evaluation results for subtask A on the development and test sets for Spanish and English. F1 (avg) is the average on positive and negative classes.", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |