| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:51:23.643889Z" |
| }, |
| "title": "A Semi-Supervised Approach to Detect Toxic Comments", |
| "authors": [ |
| { |
| "first": "Ghivvago", |
| "middle": [ |
| "D" |
| ], |
| "last": "Saraiva", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Federal University of Piau\u00ed", |
| "location": { |
| "country": "Brazil" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Rafael", |
| "middle": [ |
| "T" |
| ], |
| "last": "Anchi\u00eata", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [ |
| "A R" |
| ], |
| "last": "Neto", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Raimundo", |
| "middle": [ |
| "S" |
| ], |
| "last": "Moura", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Federal University of Piau\u00ed", |
| "location": { |
| "country": "Brazil" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Toxic comments contain forms of nonacceptable language targeted towards groups or individuals. These types of comments become a serious concern for government organizations, online communities, and social media platforms. Although there are some approaches to handle non-acceptable language, most of them focus on supervised learning and the English language. In this paper, we deal with toxic comment detection as a semi-supervised strategy over a heterogeneous graph. We evaluate the approach on a toxic dataset of the Portuguese language, outperforming several graph-based methods and achieving competitive results compared to transformer architectures.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Toxic comments contain forms of nonacceptable language targeted towards groups or individuals. These types of comments become a serious concern for government organizations, online communities, and social media platforms. Although there are some approaches to handle non-acceptable language, most of them focus on supervised learning and the English language. In this paper, we deal with toxic comment detection as a semi-supervised strategy over a heterogeneous graph. We evaluate the approach on a toxic dataset of the Portuguese language, outperforming several graph-based methods and achieving competitive results compared to transformer architectures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Toxic comments, posts, and other types of content became more common in social media nowadays. They contain forms of non-acceptable language (profanity), which may be concealed or explicit, including insults and threats directed to a group or individual (Zampieri et al., 2019) . These comments spread rapidly on the internet, especially on social networks where they find acceptance, and may culminate in several threats to individuals, becoming a serious concern for government organizations, online communities, and social media platforms.", |
| "cite_spans": [ |
| { |
| "start": 254, |
| "end": 277, |
| "text": "(Zampieri et al., 2019)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The term toxic comment is commonly found in literature as harmful speech, hate speech, or offensive language. Toxic comment may be viewed as negative online behaviors, i.e., comments that are rude, disrespectful, may contain hate speech, or otherwise likely to make someone leave a discussion 1 . Schmidt and Wiegand (2017) define hate speech as any communication that disparages a person or a group based on some characteristic such as race, color, ethnicity, gender, sexual orientation, nationality, religion, or other characteristics. Also, it may occur with different linguistic styles, even in subtle forms or when humour is used (Fortuna and Nunes, 2018) . It is important to highlight that fighting these types of comments is of utmost importance since they are a crime in several countries.", |
| "cite_spans": [ |
| { |
| "start": 297, |
| "end": 323, |
| "text": "Schmidt and Wiegand (2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 635, |
| "end": 660, |
| "text": "(Fortuna and Nunes, 2018)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To deal with toxic comments, most approaches adopt supervised-machine learning techniques and are mainly focused on the English language (Poletto et al., 2020) . These approaches range from surface-level features, as Bag-Of-Words (Paiva et al., 2019), linguistics features, as Part-Of-Speech information (Chen et al., 2012) , deep neural networks, as Long Short-Term Memory (LSTM) (Fortuna et al., 2019) and Convolutional Neural Networks (CNN) (Badjatiya et al., 2017) to Transformer architectures (Leite et al., 2020) . Despite interesting results achieved by Transformer architectures, there are still several rooms to be explored in this research area.", |
| "cite_spans": [ |
| { |
| "start": 137, |
| "end": 159, |
| "text": "(Poletto et al., 2020)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 304, |
| "end": 323, |
| "text": "(Chen et al., 2012)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 381, |
| "end": 403, |
| "text": "(Fortuna et al., 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 444, |
| "end": 468, |
| "text": "(Badjatiya et al., 2017)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 498, |
| "end": 518, |
| "text": "(Leite et al., 2020)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we developed a semi-supervised strategy to detect toxic comments in the Brazilian Portuguese language. Semi-supervision is the problem of learning from labeled and unlabeled data (Abney, 2007; Subramanya and Talukdar, 2014) , in which given a point set X = {x 1 , ..., x l , x l+1 , ..., x n } and a label set L = {1, ..., c}, the first l points have labels {y 1 , ..., y l } \u2208 L and the remaining points are unlabeled (Zhou et al., 2004) .", |
| "cite_spans": [ |
| { |
| "start": 194, |
| "end": 207, |
| "text": "(Abney, 2007;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 208, |
| "end": 238, |
| "text": "Subramanya and Talukdar, 2014)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 434, |
| "end": 453, |
| "text": "(Zhou et al., 2004)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We modeled that problem as a heterogeneous network. The structure of our graph was inspired by de Sousa et al. (2020) and Anchi\u00eata et al. (2020) . These authors modeled the tasks of helpfulness prediction and paraphrase identification as a heterogeneous network, respectively. For that, they defined an undirected unweighted graph with two node types: sentence and token. However, we have created a weighted graph based on pre-trained word embeddings. The weight between sentence and token nodes is the average of the embedding values for that token. Figure 1 depicts an example of a sentence modeled as a graph. From this figure, we may see two node types: token and sentence, and an undirected and weighted edges between the sentence and tokens nodes. ", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 117, |
| "text": "de Sousa et al. (2020)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 122, |
| "end": 144, |
| "text": "Anchi\u00eata et al. (2020)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 551, |
| "end": 559, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "E(t) E(t) E(t) E(t) E(t) E(t)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Figure 1: Example of a graph model for the sentence \"Holy shit, I miss playing midnight club\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To extract features from the graph structure, we used a regularization algorithm that propagates labels from a small set of labeled nodes to the entire graph.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We evaluated the approach using the ToLD-Br corpus (Leite et al., 2020). It has twenty-one thousand annotated tweets as either toxic or non-toxic language. Also, we compared our strategy with different graph-based methods and with transformerbased methods. Our method outperformed all graph-based approaches and achieved competitive results compared to transformer-based methods, using only 10% of labeled nodes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The reminder of this paper is structured as follows: Section 2 briefly presents related work. In Section 3, we show the used corpora. Section 4 details our developed approach. In Section 5, we analyze the conducted experiments. Finally, Section 6 concludes the paper, presenting future directions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As aforementioned, the main approaches to detect toxic comments are based on supervised machine learning. Here, we briefly present the main works.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Word", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Most of the works that study this task commonly point first to surface-level features, such as bag of words and lexicon-based approaches, with negative words as features (Gitari et al., 2015; Waseem and Hovy, 2016; Waseem et al., 2017; Schmidt and Wiegand, 2017) .", |
| "cite_spans": [ |
| { |
| "start": 170, |
| "end": 191, |
| "text": "(Gitari et al., 2015;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 192, |
| "end": 214, |
| "text": "Waseem and Hovy, 2016;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 215, |
| "end": 235, |
| "text": "Waseem et al., 2017;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 236, |
| "end": 262, |
| "text": "Schmidt and Wiegand, 2017)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Word", |
| "sec_num": "2" |
| }, |
| { |
| "text": "More recently, neural networks-based strategies and transformer-based architectures has been applied to hate speech detection due to the good results achieved in various tasks. Banerjee et al. (2020) evaluated pre-trained word embeddings with CNN networks to hate speech detection for the Indian language. Rizwan et al. 2020 For the Portuguese language, most of the works follow the trend of supervised approaches. de Pelle and Moreira (2017) created a dataset consist of 1, 250 offensive comments and developed a baseline method based on n-gram features to classify offensive comments in their dataset. Fortuna et al. (2019) created a hate speech dataset composed of 5, 668 tweets and developed a baseline classification using pre-trained word embeddings and LSTM in their dataset. Coutinho and Malheiros (2020) trained a logistic regression using superficial features for sentiment analysis. Then, they evaluated that model into a homophobia corpus to detect homophobic posts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Word", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Although there are some efforts to detect nonacceptable language in Portuguese, they evaluate the developed approach in their own corpus, making a fair comparison among the models difficult. Moreover, these corpora are much smaller when compared to corpora of other languages (Poletto et al., 2020) and than the ToLD-Br corpus. This fact makes the development of robust strategies to handle toxic comments difficult, as they usually require a large corpus.", |
| "cite_spans": [ |
| { |
| "start": 276, |
| "end": 298, |
| "text": "(Poletto et al., 2020)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Word", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Toxic Language Dataset for Brazilian Portuguese (ToLD-Br) (Leite et al., 2020) is a very recent dataset with Twitter posts in the Brazilian Portuguese language. It has 21K tweets manually annotated into seven categories: non-toxic,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ToLD-BR Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "LGBTQ+phobia, obscene, insult, racism, misogyny, and xenophobia. The corpus is the largest dataset available for toxic data analysis in social media for Portuguese and the first dataset with demographic information about annotators.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ToLD-BR Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Besides seven categories, the authors released a binary version of the corpus for the binary classification task, as shown in Table 1 As one can see in Table 1 , the corpus has a little more non-toxic than toxic tweets. In this paper, we adopted the binary version of the corpus, i.e., our objective is to identify if a comment is toxic or non-toxic.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 126, |
| "end": 133, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 152, |
| "end": 159, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "ToLD-BR Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In what follows, we detail our strategy to handle toxic texts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ToLD-BR Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We organized the strategy into four steps, as illustrated in Figure 2 . Subsections 4.1, 4.2, 4.3, and 4.4 describe the stages. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 61, |
| "end": 69, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Semi-supervised approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In the pre-processing 2 , we normalized and cleaned the tweets. In the first one, we applied the Enelvo tool (Costa Bertaglia and Volpe Nunes, 2016) to normalise abbreviated and repeated words. In the second one, we simply clean URLs, emojis, and tweet mentions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-processing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We modeled toxic comments detection as a heterogeneous network since this network type contains abundant information with structural relations (edges) among multi-typed nodes as well as unstructured content associated with each node (Zhang et al., 2019) . Graph structures have been used for several tasks, such as: topic model, name disambiguation, scientific impact measurement, and others, obtaining good results (King et al., 2014) . We defined a undirected and weighted graph as G = (V, E, W ), where V is a set of vertices V = {v 1 , ..., v n }, E indicates a set of edges E = {e 1 , ..., e n }, and W is a weighted adjacency matrix, in which W i,j denotes the weight of an edge between nodes i and j. We defined two node types: token and sentence and two constraints not allowing link among tokens nodes or among sentences nodes.", |
| "cite_spans": [ |
| { |
| "start": 233, |
| "end": 253, |
| "text": "(Zhang et al., 2019)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 416, |
| "end": 435, |
| "text": "(King et al., 2014)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph-Based Method", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The strategy of weighting links between a token and a sentence node is straightforward. The weight is the average 3 of embedding vectors of the token node. To get embedding values for each token, we used 100-dimensional GloVe embeddings 4 for the Portuguese language (Hartmann et al., 2017) . Figure 3 shows the scheme of the network designed for this task.", |
| "cite_spans": [ |
| { |
| "start": 267, |
| "end": 290, |
| "text": "(Hartmann et al., 2017)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 293, |
| "end": 301, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Graph-Based Method", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Token Avg.Emb (tok) Figure 3 : The network scheme for weighted edges.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 20, |
| "end": 28, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentence", |
| "sec_num": null |
| }, |
| { |
| "text": "One can see that the edges are undirected and weighted, and a sentence node may share several token nodes whenever the token is in the sentence, i.e., the edges between token nodes and sentence nodes are based on word occurrence in sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence", |
| "sec_num": null |
| }, |
| { |
| "text": "To extract the features regarding the network object classes, we applied a regularization method to the graph. Regularization is a kind of semi-supervised (or transductive) classification method that aims to find a set of labels, minimizing a cost function and satisfying two conditions: (i) the method needs to be consistent with the set of labels manually annotated and (ii) the method needs to be consistent with the network topology, considering that nearest neighbors tend to have the same labels (Ji et al., 2010) .", |
| "cite_spans": [ |
| { |
| "start": 502, |
| "end": 519, |
| "text": "(Ji et al., 2010)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regularization", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We used the learning with Local and Global Consistence (LGC) (Zhou et al., 2004) as a regularization method. The algorithm designs a classi-fying function that is sufficiently smooth concerning the intrinsic structure collectively revealed by known labeled and unlabeled points. Thus, the LGC lets every point iteratively spread its label information to its neighbors until a global stable state is achieved (Gui et al., 2014) . Also, it allows the class information of the labeled objects to be changed during the classification as objects may be erroneously labeled and, consequently, decrease the performance of the classification. More than that, the algorithm diminished the influence of objects with a high degree (many neighboring objects), therefore, these objects do not have excessive influence in the classification.", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 80, |
| "text": "(Zhou et al., 2004)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 408, |
| "end": 426, |
| "text": "(Gui et al., 2014)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Regularization", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "To execute the algorithm, a set of nodes need to be pre-labeled. The regularizer randomly prelabeled, i.e., supposing that the percentage of prelabeled nodes is equals 5%, it means that 0.25% of each class is randomly pre-labeled. As a result, the regularizer produces values related to coordinates for each object in the network, as shown in Table 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 343, |
| "end": 350, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Regularization", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Value 1 Value 2 Label 100 0.004567 0.001456 1 255 0.002789 0.008763 0 878 0.001998 0.005342 0 233 0.008764 0.003215 1 From Table 2 , Id is the object identifier, Values refer to coordinates of each object in the network, and Label 1 shows toxic, while Label 0 is a nontoxic tweet.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 123, |
| "end": 130, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Id", |
| "sec_num": null |
| }, |
| { |
| "text": "With the regularization values, we fed several machine learning algorithms to identify and predict toxic comments. We experimented Multi Layer Perceptron, Na\u00efve Bayes, Decision Tree, Support Vector Machine, and Gradient Boosting from the Scikit-Learn library (Pedregosa et al., 2011) .", |
| "cite_spans": [ |
| { |
| "start": 259, |
| "end": 283, |
| "text": "(Pedregosa et al., 2011)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "In the following section, we detailed our carried out experiments, then, the achieved results are presented.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classification", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "In order to produce coordinate values for each object from the regularizer, we ranged the number of pre-labeled nodes from 5% to 30%. Then, we applied the machine learning algorithms to train and classifier toxic comments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We achieved the best result with the Gradient Boosting classifier 5 using only 10% of the prelabeled nodes i.e., the classification does not improve after this percentage. Table 3 shows the achieved results. It is important to say that only the training set is pre-labeled. Besides our approach, we evaluated other graph models of different structures. First, we used the network graph developed by Anchi\u00eata et al. (2020) . That graph does not use weight between the nodes. Second, we used the Term Frequency-Inverse Document Frequency (TF-IDF) as weight instead of the average of embeddings. Third, we used bigrams and trigrams as nodes rather than token nodes. Finally, we used the Pointwise Mutual Information (PMI) measure (Church and Hanks, 1990) as the weight between the bi and trigrams nodes. For these approaches, we adopted the same regularization algorithm, ranging the pre-labeled nodes from 5% to 30%. In Table 4 , we present the bestachieved results. From this table, our graph modeling and the gradient boosting classifier achieved better results than these other graphs, as well as classifier variations. This, we think, is because of the embedding value among the graph nodes since it is able to capture morphological, syntactic, and semantic knowledge of a word. As we used the average word embedding value, it includes information from all of the individual vector values, working as an overall summary of all vector values.", |
| "cite_spans": [ |
| { |
| "start": 399, |
| "end": 421, |
| "text": "Anchi\u00eata et al. (2020)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 727, |
| "end": 751, |
| "text": "(Church and Hanks, 1990)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 172, |
| "end": 179, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 918, |
| "end": 925, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We further compared our strategy with other graph-based approaches: Text Graph Convolutional Network (TextGCN) (Yao et al., 2019) and Heterogeneous Graph Attention Network (HGAT) (Yang et al., 2021) . The former models the whole text corpus as a document-word graph with word co-occurrence relations and applies GCN for classification. The latter models the texts using a heterogeneous information network framework and adopts heterogeneous graph attention to embed that framework for text classification based on a dual-level attention mechanism. Finally, we compared our approach with a transformer-based method as it has achieved remarkable results in several areas of Natural Language Processing (NLP). We compared our strategy with BR-BERT (Leite et al., 2020), which is a monolingual BERT, and M- BERT (Leite et al., 2020) , which is a multilingual BERT. Table 5 shows the comparison between these methods. As we can see from Table 5 , our approach outperformed the graph-based methods and reached a competitive result compared to transformer models. Although our strategy did not outperform transformers, we believe the results are very promising, since it requires much less computational power than transformers. Moreover, our method requires less annotated data (only 10%) than transformers to achieve interesting results.", |
| "cite_spans": [ |
| { |
| "start": 111, |
| "end": 129, |
| "text": "(Yao et al., 2019)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 179, |
| "end": 198, |
| "text": "(Yang et al., 2021)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 803, |
| "end": 828, |
| "text": "BERT (Leite et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 861, |
| "end": 868, |
| "text": "Table 5", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 932, |
| "end": 939, |
| "text": "Table 5", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our approach is available at https://github.c om/rafaelanchieta/toxic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this paper, we explored a semi-supervised strategy to deal with toxic comments from Twitter. We modeled the texts as a heterogeneous network graph with two node types and weighted edges among nodes. Then, we applied a regularization algorithm to extract features related to the toxic texts. Finally, we used these features to feed a classifier to identify and predict toxic comments. Our approach outperformed several graph-based methods and achieved a competitive result compared to the BERT model, using only 10% of the corpus. We hope that this graph model brings insights to hate speech detection research, helping to improve the results. Furthermore, our strategy may be employed in other languages, as it only requires an embedding representation. As future work, we intend to explore the graph structure, analyzing some network measures, such as degree, centrality, community identification, and others. Also, we aim to examine contextual embeddings rather than traditional embeddings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://www.kaggle.com/c/jigsaw-toxi c-comment-classification-challenge/overv iew", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The obtained results without pre-processing were worse than with pre-preprocessing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We also tested the sum, maximum, and minimun values.4 We also experimented other pre-trained models with dimensions of 50, 100, and 300.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We used as parameters n stimators = 5 and max depth = 5", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The authors are grateful to CAPES for supporting this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Semisupervised Learning for Computational Linguistics", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Abney", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Abney. 2007. Semisupervised Learning for Computational Linguistics, 1st edition. Chapman & Hall/CRC.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Modeling the paraphrase detection task over a heterogeneous graph network with data augmentation", |
| "authors": [ |
| { |
| "first": "Rog\u00e9rio F De", |
| "middle": [], |
| "last": "Rafael T Anchi\u00eata", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sousa", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Thiago", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pardo", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Information", |
| "volume": "11", |
| "issue": "9", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rafael T Anchi\u00eata, Rog\u00e9rio F de Sousa, and Thiago AS Pardo. 2020. Modeling the paraphrase detection task over a heterogeneous graph network with data augmentation. Information, 11(9):422.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Comparing pre-trained language models for spanish hate speech detection. Expert Systems with Applications", |
| "authors": [ |
| { |
| "first": "Flor", |
| "middle": [ |
| "Miriam" |
| ], |
| "last": "Plaza-Del Arco", |
| "suffix": "" |
| }, |
| { |
| "first": "Dolores", |
| "middle": [], |
| "last": "Molina-Gonz\u00e1lez", |
| "suffix": "" |
| }, |
| { |
| "first": "Alfonso", |
| "middle": [], |
| "last": "Ure\u00f1a-L\u00f3pez", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Teresa Mart\u00edn-Valdivia", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "166", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.eswa.2020.114120" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Flor Miriam Plaza-del Arco, M Dolores Molina- Gonz\u00e1lez, L Alfonso Ure\u00f1a-L\u00f3pez, and M Teresa Mart\u00edn-Valdivia. 2021. Comparing pre-trained lan- guage models for spanish hate speech detection. Ex- pert Systems with Applications, 166:114120.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Deep learning for hate speech detection in tweets", |
| "authors": [ |
| { |
| "first": "Pinkesh", |
| "middle": [], |
| "last": "Badjatiya", |
| "suffix": "" |
| }, |
| { |
| "first": "Shashank", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Manish", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Vasudeva", |
| "middle": [], |
| "last": "Varma", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 26th international conference on World Wide Web companion", |
| "volume": "", |
| "issue": "", |
| "pages": "759--760", |
| "other_ids": { |
| "DOI": [ |
| "https://dl.acm.org/doi/pdf/10.1145/3041021.3054223?casa_token=VM0C7Lhq47EAAAAA:IiWW-99FmhNAnAs2wS8_YceU6I1-qOQHg-GNvixFL9wTQ6ugWPcGdhSaCi47BzlQOP8pVOWU9Z0vLg" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pinkesh Badjatiya, Shashank Gupta, Manish Gupta, and Vasudeva Varma. 2017. Deep learning for hate speech detection in tweets. In Proceedings of the 26th international conference on World Wide Web companion, pages 759-760, Perth, Australia. In- ternational World Wide Web Conferences Steering Committee.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Comparison of Pretrained Embeddings to Identify Hate Speech in Indian Code-Mixed Text", |
| "authors": [ |
| { |
| "first": "Shubhanker", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "P" |
| ], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccrae", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2nd International Conference on Advances in Computing, Communication Control and Networking", |
| "volume": "", |
| "issue": "", |
| "pages": "21--25", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICACCCN51052.2020.9362731" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shubhanker Banerjee, Bharathi Raja Chakravarthi, and John P. McCrae. 2020. Comparison of Pretrained Embeddings to Identify Hate Speech in Indian Code- Mixed Text. In Proceedings of the 2nd International Conference on Advances in Computing, Communica- tion Control and Networking, pages 21-25, Greater Noida, India. IEEE.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Detecting offensive language in social media to protect adolescent online safety", |
| "authors": [ |
| { |
| "first": "Ying", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yilu", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Sencun", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "2012 International Conference on Privacy, Security, Risk and Trust and 2012 International Confernece on Social Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "71--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ying Chen, Yilu Zhou, Sencun Zhu, and Heng Xu. 2012. Detecting offensive language in social media to protect adolescent online safety. In 2012 Inter- national Conference on Privacy, Security, Risk and Trust and 2012 International Confernece on Social Computing, pages 71-80, Amsterdam, Netherlands. IEEE.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Word association norms, mutual information, and lexicography", |
| "authors": [ |
| { |
| "first": "Kenneth", |
| "middle": [ |
| "Ward" |
| ], |
| "last": "Church", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Hanks", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Computational Linguistics", |
| "volume": "16", |
| "issue": "1", |
| "pages": "22--29", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenneth Ward Church and Patrick Hanks. 1990. Word association norms, mutual information, and lexicog- raphy. Computational Linguistics, 16(1):22-29.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Cross-lingual Language Model Pretraining", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis CONNEAU and Guillaume Lample. 2019. Cross-lingual Language Model Pretraining. In Ad- vances in Neural Information Processing Systems, page 11, Vancouver, Canada. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Exploring word embeddings for unsupervised textual user-generated content normalization", |
| "authors": [], |
| "year": 2016, |
| "venue": "Proceedings of the 2nd Workshop on Noisy User-generated Text (WNUT)", |
| "volume": "", |
| "issue": "", |
| "pages": "112--120", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thales Felipe Costa Bertaglia and Maria das Gra\u00e7as Volpe Nunes. 2016. Exploring word embeddings for unsupervised textual user-generated content nor- malization. In Proceedings of the 2nd Workshop on Noisy User-generated Text (WNUT), pages 112-120, Osaka, Japan. The COLING 2016 Organizing Com- mittee.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Detec\u00e7\u00e3o de Mensagens Homof\u00f3bicas em Portugu\u00eas no Twitter usando An\u00e1lise de Sentimentos", |
| "authors": [ |
| { |
| "first": "Matheus", |
| "middle": [], |
| "last": "Vinicius", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuri", |
| "middle": [], |
| "last": "Coutinho", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Malheiros", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Anais do IX Brazilian Workshop on Social Network Analysis and Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "1--12", |
| "other_ids": { |
| "DOI": [ |
| "10.5753/brasnam.2020.11158" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vinicius Matheus Coutinho and Yuri Malheiros. 2020. Detec\u00e7\u00e3o de Mensagens Homof\u00f3bicas em Portugu\u00eas no Twitter usando An\u00e1lise de Sentimentos. In Anais do IX Brazilian Workshop on Social Network Anal- ysis and Mining, pages 1-12, Porto Alegre, RS, Brasil. SBC.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A Deep Learning Framework for Automatic Detection of Hate Speech Embedded in Arabic Tweets", |
| "authors": [ |
| { |
| "first": "Rehab", |
| "middle": [], |
| "last": "Duwairi", |
| "suffix": "" |
| }, |
| { |
| "first": "Amena", |
| "middle": [], |
| "last": "Hayajneh", |
| "suffix": "" |
| }, |
| { |
| "first": "Muhannad", |
| "middle": [], |
| "last": "Quwaider", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Arabian Journal for Science and Engineering", |
| "volume": "46", |
| "issue": "4", |
| "pages": "4001--4014", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/s13369-021-05383-3" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rehab Duwairi, Amena Hayajneh, and Muhannad Quwaider. 2021. A Deep Learning Framework for Automatic Detection of Hate Speech Embedded in Arabic Tweets. Arabian Journal for Science and En- gineering, 46(4):4001-4014.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A survey on automatic detection of hate speech in text", |
| "authors": [ |
| { |
| "first": "Paula", |
| "middle": [], |
| "last": "Fortuna", |
| "suffix": "" |
| }, |
| { |
| "first": "S\u00e9rgio", |
| "middle": [], |
| "last": "Nunes", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ACM Computing Surveys (CSUR)", |
| "volume": "51", |
| "issue": "4", |
| "pages": "1--30", |
| "other_ids": { |
| "DOI": [ |
| "https://dl.acm.org/doi/pdf/10.1145/3232676?casa_token=AYxpp24C61kAAAAA:GYrPwlXKnZBz_1sCCCTKOF5XPxD8TZ5v-3i4kaDgyI0U-GMFUxdSHoKw6p3Gf1oLfGjBP2g4TvAN0A" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paula Fortuna and S\u00e9rgio Nunes. 2018. A survey on au- tomatic detection of hate speech in text. ACM Com- puting Surveys (CSUR), 51(4):1-30.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A hierarchically-labeled Portuguese hate speech dataset", |
| "authors": [ |
| { |
| "first": "Paula", |
| "middle": [], |
| "last": "Fortuna", |
| "suffix": "" |
| }, |
| { |
| "first": "Jo\u00e3o", |
| "middle": [], |
| "last": "Rocha Da", |
| "suffix": "" |
| }, |
| { |
| "first": "Juan", |
| "middle": [], |
| "last": "Silva", |
| "suffix": "" |
| }, |
| { |
| "first": "Leo", |
| "middle": [], |
| "last": "Soler-Company", |
| "suffix": "" |
| }, |
| { |
| "first": "S\u00e9rgio", |
| "middle": [], |
| "last": "Wanner", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nunes", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Third Workshop on Abusive Language Online", |
| "volume": "", |
| "issue": "", |
| "pages": "94--104", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-3510" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paula Fortuna, Jo\u00e3o Rocha da Silva, Juan Soler- Company, Leo Wanner, and S\u00e9rgio Nunes. 2019. A hierarchically-labeled Portuguese hate speech dataset. In Proceedings of the Third Workshop on Abusive Language Online, pages 94-104, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A lexicon-based approach for hate speech detection", |
| "authors": [ |
| { |
| "first": "Njagi", |
| "middle": [], |
| "last": "Dennis Gitari", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhang", |
| "middle": [], |
| "last": "Zuping", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanyurwimfura", |
| "middle": [], |
| "last": "Damien", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Long", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Journal of Multimedia and Ubiquitous Engineering", |
| "volume": "10", |
| "issue": "4", |
| "pages": "215--230", |
| "other_ids": { |
| "DOI": [ |
| "10.14257/ijmue.2015.10.4.21" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Njagi Dennis Gitari, Zhang Zuping, Hanyurwimfura Damien, and Jun Long. 2015. A lexicon-based approach for hate speech detection. International Journal of Multimedia and Ubiquitous Engineering, 10(4):215-230.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Semi-supervised learning with local and global consistency", |
| "authors": [ |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Gui", |
| "suffix": "" |
| }, |
| { |
| "first": "Rongxiang", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongqiu", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "International Journal of Computer Mathematics", |
| "volume": "91", |
| "issue": "11", |
| "pages": "2389--2402", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jie Gui, Rongxiang Hu, Zhongqiu Zhao, and Wei Jia. 2014. Semi-supervised learning with local and global consistency. International Journal of Com- puter Mathematics, 91(11):2389-2402.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Portuguese word embeddings: Evaluating on word analogies and natural language tasks", |
| "authors": [ |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Hartmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Erick", |
| "middle": [], |
| "last": "Fonseca", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Shulby", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Treviso", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00e9ssica", |
| "middle": [], |
| "last": "Silva", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandra", |
| "middle": [], |
| "last": "Alu\u00edsio", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th Brazilian Symposium in Information and Human Language Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "122--131", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nathan Hartmann, Erick Fonseca, Christopher Shulby, Marcos Treviso, J\u00e9ssica Silva, and Sandra Alu\u00edsio. 2017. Portuguese word embeddings: Evaluating on word analogies and natural language tasks. In Proceedings of the 11th Brazilian Symposium in In- formation and Human Language Technology, pages 122-131, Uberl\u00e2ndia, Brazil. Sociedade Brasileira de Computa\u00e7\u00e3o.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Graph regularized transductive classification on heterogeneous information networks", |
| "authors": [ |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Yizhou", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Marina", |
| "middle": [], |
| "last": "Danilevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiawei", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Joint European Conference on Machine Learning and Knowledge Discovery in Databases", |
| "volume": "", |
| "issue": "", |
| "pages": "570--586", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-3-642-15880-3_42" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ming Ji, Yizhou Sun, Marina Danilevsky, Jiawei Han, and Jing Gao. 2010. Graph regularized transduc- tive classification on heterogeneous information net- works. In In Proceedings of the Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pages 570-586, Barcelona, Spain. Springer.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Heterogeneous networks and their applications: Scientometrics, name disambiguation, and topic modeling. Transactions of the Association for Computational Linguistics", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "King", |
| "suffix": "" |
| }, |
| { |
| "first": "Rahul", |
| "middle": [], |
| "last": "Jha", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [ |
| "R" |
| ], |
| "last": "Radev", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "1--14", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00161" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben King, Rahul Jha, and Dragomir R. Radev. 2014. Heterogeneous networks and their applications: Sci- entometrics, name disambiguation, and topic model- ing. Transactions of the Association for Computa- tional Linguistics, 2:1-14.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Toxic language detection in social media for Brazilian Portuguese: New dataset and multilingual analysis", |
| "authors": [ |
| { |
| "first": "Diego", |
| "middle": [], |
| "last": "Jo\u00e3o Augusto Leite", |
| "suffix": "" |
| }, |
| { |
| "first": "Kalina", |
| "middle": [], |
| "last": "Silva", |
| "suffix": "" |
| }, |
| { |
| "first": "Carolina", |
| "middle": [], |
| "last": "Bontcheva", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Scarton", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "914--924", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jo\u00e3o Augusto Leite, Diego Silva, Kalina Bontcheva, and Carolina Scarton. 2020. Toxic language detec- tion in social media for Brazilian Portuguese: New dataset and multilingual analysis. In Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Conference on Natural Lan- guage Processing, pages 914-924, Suzhou, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Detec\u00e7\u00e3o autom\u00e1tica de discurso de\u00f3dio em coment\u00e1rios online", |
| "authors": [ |
| { |
| "first": "Vanecy", |
| "middle": [], |
| "last": "Peter Dias Paiva", |
| "suffix": "" |
| }, |
| { |
| "first": "Silva", |
| "middle": [], |
| "last": "Matias Da", |
| "suffix": "" |
| }, |
| { |
| "first": "Raimundo Santos", |
| "middle": [], |
| "last": "Moura", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Anais da VII Escola Regional de Computa\u00e7\u00e3o Aplicada\u00e0 Sa\u00fade", |
| "volume": "", |
| "issue": "", |
| "pages": "157--162", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Dias Paiva, Vanecy Matias da Silva, and Raimundo Santos Moura. 2019. Detec\u00e7\u00e3o au- tom\u00e1tica de discurso de\u00f3dio em coment\u00e1rios online. In Anais da VII Escola Regional de Computa\u00e7\u00e3o Aplicada\u00e0 Sa\u00fade, pages 157-162, Teresina, Piau\u00ed, Brazil. Sociedade Brasileira de Computa\u00e7\u00e3o.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Scikit-learn: Machine learning in Python", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Pedregosa", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Varoquaux", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gramfort", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Thirion", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Grisel", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Blondel", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Prettenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Dubourg", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Vanderplas", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Passos", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Cournapeau", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Brucher", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Perrot", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Duchesnay", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2825--2830", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Offensive Comments in the Brazilian Web: a dataset and baseline results", |
| "authors": [ |
| { |
| "first": "Rogers", |
| "middle": [], |
| "last": "Prates De Pelle", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Viviane", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Moreira", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Anais do VI Brazilian Workshop on Social Network Analysis and Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "510--519", |
| "other_ids": { |
| "DOI": [ |
| "10.5753/brasnam.2017.3260" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rogers Prates de Pelle and Viviane P Moreira. 2017. Offensive Comments in the Brazilian Web: a dataset and baseline results. In Anais do VI Brazilian Work- shop on Social Network Analysis and Mining, pages 510-519, S\u00e3o Paulo, Brazil. Sociedade Brasileira de Computa\u00e7\u00e3o.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Resources and benchmark corpora for hate speech detection: a systematic review", |
| "authors": [ |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Poletto", |
| "suffix": "" |
| }, |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Viviana", |
| "middle": [], |
| "last": "Patti", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Language Resources and Evaluation", |
| "volume": "55", |
| "issue": "", |
| "pages": "477--523", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/s10579-020-09502-8" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabio Poletto, Valerio Basile, Manuela Sanguinetti, Cristina Bosco, and Viviana Patti. 2020. Resources and benchmark corpora for hate speech detection: a systematic review. Language Resources and Evalu- ation, 55:477-523.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Hate-speech and offensive language detection in Roman Urdu", |
| "authors": [ |
| { |
| "first": "Hammad", |
| "middle": [], |
| "last": "Rizwan", |
| "suffix": "" |
| }, |
| { |
| "first": "Muhammad", |
| "middle": [ |
| "Haroon" |
| ], |
| "last": "Shakeel", |
| "suffix": "" |
| }, |
| { |
| "first": "Asim", |
| "middle": [], |
| "last": "Karim", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2512--2522", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.197" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hammad Rizwan, Muhammad Haroon Shakeel, and Asim Karim. 2020. Hate-speech and offensive lan- guage detection in Roman Urdu. In Proceedings of the 2020 Conference on Empirical Methods in Natu- ral Language Processing, pages 2512-2522, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "A survey on hate speech detection using natural language processing", |
| "authors": [ |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Schmidt", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Wiegand", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Fifth International Workshop on Natural Language Processing for Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-1101" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anna Schmidt and Michael Wiegand. 2017. A survey on hate speech detection using natural language pro- cessing. In Proceedings of the Fifth International Workshop on Natural Language Processing for So- cial Media, pages 1-10, Valencia, Spain. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Rafael Torres Anchi\u00eata, and Maria das Gra\u00e7as Volpe Nunes. 2020. A graphbased method for predicting the helpfulness of product opinions", |
| "authors": [ |
| { |
| "first": "Rog\u00e9rio", |
| "middle": [], |
| "last": "Figueredo De", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sousa", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "iSys-Brazilian Journal of Information Systems", |
| "volume": "13", |
| "issue": "4", |
| "pages": "6--21", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rog\u00e9rio Figueredo de Sousa, Rafael Torres Anchi\u00eata, and Maria das Gra\u00e7as Volpe Nunes. 2020. A graph- based method for predicting the helpfulness of prod- uct opinions. iSys-Brazilian Journal of Information Systems, 13(4):06-21.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Graph-based semi-supervised learning", |
| "authors": [ |
| { |
| "first": "Amarnag", |
| "middle": [], |
| "last": "Subramanya", |
| "suffix": "" |
| }, |
| { |
| "first": "Partha", |
| "middle": [ |
| "Pratim" |
| ], |
| "last": "Talukdar", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Synthesis Lectures on Artificial Intelligence and Machine Learning", |
| "volume": "8", |
| "issue": "", |
| "pages": "1--125", |
| "other_ids": { |
| "DOI": [ |
| "10.2200/S00590ED1V01Y201408AIM029" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amarnag Subramanya and Partha Pratim Talukdar. 2014. Graph-based semi-supervised learning. Syn- thesis Lectures on Artificial Intelligence and Ma- chine Learning, 8(4):1-125.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Understanding abuse: A typology of abusive language detection subtasks", |
| "authors": [ |
| { |
| "first": "Zeerak", |
| "middle": [], |
| "last": "Waseem", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Davidson", |
| "suffix": "" |
| }, |
| { |
| "first": "Dana", |
| "middle": [], |
| "last": "Warmsley", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingmar", |
| "middle": [], |
| "last": "Weber", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First Workshop on Abusive Language Online", |
| "volume": "", |
| "issue": "", |
| "pages": "78--84", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-3012" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeerak Waseem, Thomas Davidson, Dana Warmsley, and Ingmar Weber. 2017. Understanding abuse: A typology of abusive language detection subtasks. In Proceedings of the First Workshop on Abusive Lan- guage Online, pages 78-84, Vancouver, BC, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Hateful symbols or hateful people? predictive features for hate speech detection on Twitter", |
| "authors": [ |
| { |
| "first": "Zeerak", |
| "middle": [], |
| "last": "Waseem", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the NAACL Student Research Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "88--93", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N16-2013" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeerak Waseem and Dirk Hovy. 2016. Hateful sym- bols or hateful people? predictive features for hate speech detection on Twitter. In Proceedings of the NAACL Student Research Workshop, pages 88-93, San Diego, California. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Hgat: Heterogeneous graph attention networks for semi-supervised short text classification", |
| "authors": [ |
| { |
| "first": "Tianchi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Linmei", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chuan", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Houye", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoli", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Liqiang", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "ACM Transactions on Information Systems", |
| "volume": "", |
| "issue": "3", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3450352" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianchi Yang, Linmei Hu, Chuan Shi, Houye Ji, Xiaoli Li, and Liqiang Nie. 2021. Hgat: Heterogeneous graph attention networks for semi-supervised short text classification. ACM Transactions on Informa- tion Systems, 39(3).", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Graph convolutional networks for text classification", |
| "authors": [ |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengsheng", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Thirty-Third AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "7370--7377", |
| "other_ids": { |
| "DOI": [ |
| "10.1609/aaai.v33i01.33017370" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liang Yao, Chengsheng Mao, and Yuan Luo. 2019. Graph convolutional networks for text classification. In Proceedings of the Thirty-Third AAAI Conference on Artificial Intelligence, pages 7370-7377, Hon- olulu, HI, USA. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "SemEval-2019 task 6: Identifying and categorizing offensive language in social media (OffensEval)", |
| "authors": [ |
| { |
| "first": "Marcos", |
| "middle": [], |
| "last": "Zampieri", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "Noura", |
| "middle": [], |
| "last": "Farra", |
| "suffix": "" |
| }, |
| { |
| "first": "Ritesh", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "75--86", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S19-2010" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcos Zampieri, Shervin Malmasi, Preslav Nakov, Sara Rosenthal, Noura Farra, and Ritesh Kumar. 2019. SemEval-2019 task 6: Identifying and catego- rizing offensive language in social media (OffensE- val). In Proceedings of the 13th International Work- shop on Semantic Evaluation, pages 75-86, Min- neapolis, Minnesota, USA. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Heterogeneous graph neural network", |
| "authors": [ |
| { |
| "first": "Chuxu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongjin", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Chao", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ananthram", |
| "middle": [], |
| "last": "Swami", |
| "suffix": "" |
| }, |
| { |
| "first": "Nitesh", |
| "middle": [ |
| "V" |
| ], |
| "last": "Chawla", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "793--803", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3292500.3330961" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chuxu Zhang, Dongjin Song, Chao Huang, Ananthram Swami, and Nitesh V. Chawla. 2019. Heterogeneous graph neural network. In Proceedings of the 25th ACM SIGKDD International Conference on Knowl- edge Discovery and Data Mining, page 793-803, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Learning with local and global consistency", |
| "authors": [ |
| { |
| "first": "Dengyong", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Olivier", |
| "middle": [], |
| "last": "Bousquet", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Thomas Navin Lal", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernhard", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sch\u00f6lkopf", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "321--328", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston, and Bernhard Sch\u00f6lkopf. 2004. Learning with local and global consistency. In Ad- vances in neural information processing systems, pages 321-328, MA, USA.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "explored transfer-learning of embeddings models to Roman Urdu and developed a CNN-gram network to hate speech classification for that language. Duwairi et al. (2021) investigated the ability of CNN, CNN-LSTM, and BiLSTM-CNN to classify hate speech in Arabic. Plaza-del Arco et al. (2021) compared two pre-trained language models, such as BERT (Devlin et al., 2019) and XLM (CONNEAU and Lample, 2019) trained to detect hate speech in the Spanish language.", |
| "num": null |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Process to deal with toxic comments.", |
| "num": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>Label</td><td colspan=\"4\">Train. Valid. Test Prop.</td></tr><tr><td>Toxic</td><td>7,375</td><td>908</td><td>972</td><td>44%</td></tr><tr><td colspan=\"5\">Non-toxic 9,425 1,192 1,128 56%</td></tr></table>", |
| "text": "." |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "Binary version of the ToLD-Br corpus." |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "Example of regularizer output." |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "Achieved results with the gradient boosting classifier." |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "Comparison among graph-based approaches." |
| }, |
| "TABREF8": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "Comparison between graph-based and transformer-based methods with our strategy." |
| } |
| } |
| } |
| } |