| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:54:12.029079Z" |
| }, |
| "title": "Clickbait Detection with Style-aware Title Modeling and Co-attention", |
| "authors": [ |
| { |
| "first": "Chuhan", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tsinghua University", |
| "location": { |
| "postCode": "100084", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "wuchuhan15@gmail.com" |
| }, |
| { |
| "first": "Fangzhao", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Research Asia", |
| "location": { |
| "postCode": "100080", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "wufangzhao@gmail.com" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tsinghua University", |
| "location": { |
| "postCode": "100084", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Yongfeng", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tsinghua University", |
| "location": { |
| "postCode": "100084", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "yfhuang@tsinghua.edu.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Clickbait is a form of web content designed to attract attention and entice users to click on specific hyperlinks. The detection of clickbaits is an important task for online platforms to improve the quality of web content and the satisfaction of users. Clickbait detection is typically formed as a binary classification task based on the title and body of a webpage, and existing methods are mainly based on the content of title and the relevance between title and body. However, these methods ignore the stylistic patterns of titles, which can provide important clues on identifying clickbaits. In addition, they do not consider the interactions between the contexts within title and body, which are very important for measuring their relevance for clickbait detection. In this paper, we propose a clickbait detection approach with style-aware title modeling and coattention. Specifically, we use Transformers to learn content representations of title and body, and respectively compute two content-based clickbait scores for title and body based on their representations. In addition, we propose to use a character-level Transformer to learn a style-aware title representation by capturing the stylistic patterns of title, and we compute a title stylistic score based on this representation. Besides, we propose to use a co-attention network to model the relatedness between the contexts within title and body, and further enhance their representations by encoding the interaction information. We compute a title-body matching score based on the representations of title and body enhanced by their interactions. The final clickbait score is predicted by a weighted summation of the aforementioned four kinds of scores. Extensive experiments on two benchmark datasets show that our approach can effectively improve the performance of clickbait detection and consistently outperform many baseline methods.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Clickbait is a form of web content designed to attract attention and entice users to click on specific hyperlinks. The detection of clickbaits is an important task for online platforms to improve the quality of web content and the satisfaction of users. Clickbait detection is typically formed as a binary classification task based on the title and body of a webpage, and existing methods are mainly based on the content of title and the relevance between title and body. However, these methods ignore the stylistic patterns of titles, which can provide important clues on identifying clickbaits. In addition, they do not consider the interactions between the contexts within title and body, which are very important for measuring their relevance for clickbait detection. In this paper, we propose a clickbait detection approach with style-aware title modeling and coattention. Specifically, we use Transformers to learn content representations of title and body, and respectively compute two content-based clickbait scores for title and body based on their representations. In addition, we propose to use a character-level Transformer to learn a style-aware title representation by capturing the stylistic patterns of title, and we compute a title stylistic score based on this representation. Besides, we propose to use a co-attention network to model the relatedness between the contexts within title and body, and further enhance their representations by encoding the interaction information. We compute a title-body matching score based on the representations of title and body enhanced by their interactions. The final clickbait score is predicted by a weighted summation of the aforementioned four kinds of scores. Extensive experiments on two benchmark datasets show that our approach can effectively improve the performance of clickbait detection and consistently outperform many baseline methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Clickbait is a type of web content that is designed to attract users' attention and further entice them to click hyperlinks to enter specific webpages, such as news articles, advertisements and videos (Chakraborty et al., 2016) . Several illustrative examples of clickbaits are shown in Fig. 1 . We can see that the title of the first clickbait is written in a sensationalized way by using words with strong emotions like \"MUST\", and the title of the second clickbait is misleading because it does not match the content of the body. Clickbaits are commonly used by online publishers, because clickbaits can draw more attention to the online websites where they are displayed and improve the revenue by attracting more clicks on advertisements (Dong et al., 2019) . However, clickbaits are deceptive to users because the main content of clickbaits is often uninformative, misleading, or even irrelevant to the title, which is extremely harmful for the reading satisfaction of users (Chen et al., 2015) . Thus, clickbait detection is an important task for online platforms to improve the quality of their web content and maintain their brand reputation by improving user experience (Biyani et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 201, |
| "end": 227, |
| "text": "(Chakraborty et al., 2016)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 743, |
| "end": 762, |
| "text": "(Dong et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 981, |
| "end": 1000, |
| "text": "(Chen et al., 2015)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1180, |
| "end": 1201, |
| "text": "(Biyani et al., 2016)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 287, |
| "end": 293, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Many methods formulate clickbait detection as a binary detection task, and they mainly focus on modeling the content of online articles and the relevance between title and body (Zhou, 2017; Dong et al., 2019) . For example, proposed to use a combination of bi-GRU network and attention network to learn representations of tweets posted by users for clickbait detection. Dong et al. (2019) proposed a similarity-aware clickbait detection model, which learns title and body The pandemic has caused a lot of businesses to fold, especially independent restaurants, cafes, and coffee shops. representations via an attentive bi-GRU network, and measures the global and local similarities between these representations for clickbait prediction. However, in these methods the stylistic patterns of titles (e.g., capitalization) are not taken into consideration, which are useful clues for identifying clickbaits (Biyani et al., 2016) . In addition, they cannot model the interactions between the contexts in the title and body, which are important for measuring the title-body relevance for clickbait detection. Our work is motivated by the following observations. First, the content of webpage title and body is important for clickbait detection. For example, in the title of the third webpage in Fig. 1 , the contexts like \"You Won't Believe\" are important indications of clickbaits because they express strong emotions. In addition, the body of this webpage is short and uninformative, which also implies that this webpage is a clickbait. Second, the stylistic patterns of title like the usage of numeric and capitalized characters can also provide useful clues for identifying clickbaits. For example, the title of the first webpage in Fig. 1 starts with a number \"7\" and it uses an all-capital word \"MUST\" to attract attention, both of which are commonly used by clickbaits. Therefore, modeling the stylistic patterns of title can help detect clickbaits more accurately. Third, there is inherent relatedness between the contexts within the title and body of the same webpage. For example, the words \"Weight Loss\" in the title of the first webpage in Fig. 1 have close relatedness with the words \"losing\" and \"pounds\" in the body. Modeling these interactions are helpful for measuring the relevance between title and body more accurately.", |
| "cite_spans": [ |
| { |
| "start": 177, |
| "end": 189, |
| "text": "(Zhou, 2017;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 190, |
| "end": 208, |
| "text": "Dong et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 370, |
| "end": 388, |
| "text": "Dong et al. (2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 904, |
| "end": 925, |
| "text": "(Biyani et al., 2016)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1290, |
| "end": 1296, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1732, |
| "end": 1738, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 2147, |
| "end": 2153, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose a clickbait detection approach with style-aware title modeling and co-attention (SATC), which can consider the interactions between contexts within title and body as well as the stylistic patterns of title. We first use Transformers to learn representations of title and body based on their content, and then compute a title content score and a body content score based on the representations of title and body, respectively. In addition, we propose to use a character-level Transformer to learn a style-aware title representation by capturing the stylistic patterns in the title, and we further compute a title stylistic score based on this representation. Besides, we propose to use a co-attention network to model the interactions between the contexts within title and body, and further enhance their representations by encoding their interaction information. We compute a title-body matching score based on the relevance between the interaction-enhanced representations of title and body. The final unified clickbait score is a weighted summation of the four kinds of scores, which jointly considers the content of title and body, the stylistic information of title, and the relevance between title and body. Extensive experiments on two benchmark datasets show that our approach can effectively enhance the performance of clickbait detection by incorporating the stylistic patterns of title and the title-body interactions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The main contributions of this paper are summarized as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose a style-aware title modeling method to capture the stylistic patterns of title to learn style-aware title representations for clickbait detection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We propose to use co-attention network to model the interactions between the contexts within title and body to better evaluate their relevance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Extensive experiments are conducted on two benchmark datasets, and the results validate the effectiveness of our approach in clickbait detection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Automatic detection of clickbaits is important for online platforms to purify their web content and improve user experience. Traditional clickbait detection methods usually rely on handcrafted features to build representations of webpages (Chen et al., 2015; Biyani et al., 2016; Potthast et al., 2016; Chakraborty et al., 2016; Bourgonje et al., 2017; Cao et al., 2017; Indurthi and Oota, 2017; Ge\u00e7kil et al., 2018) . For example, Chen et al. (2015) proposed to represent news articles with semantic features (e.g., unresolved pronouns, affective words, suspenseful language and overuse numerals), syntax features (e.g., forward reference and reverse narrative) and image features (e.g., image placement and emotional content). In addition, they incorporate users' behaviors on news, like reading time, sharing and commenting, to enhance news representation. They use various classification models like Naive Bayes and SVM to identify clickbaits based on the news and user behavior features. Biyani et al. (2016) proposed to represent webpages using content features like n-gram features extracted from title and body, sentiment polarity features, part-of-speech features and numerals features. They also incorporate the similarities between the TF-IDF features of title and the first 5 sentences in the body. Besides, they consider the informality of title, the use of forward reference, and the URL of webpage as complementary information. They used Gradient Boosted Decision Trees (GBDT) to classify webpages based on their features. Potthast et al. (2016) proposed to detect clickbaits on Twitter. They used features like bag-of-words, image tags, and dictionary matchings to represent tweets, and used bag-of-words, readability and length features to represent the linked webpage. They also incorporated several metadata features like the gender of user. They compared several machine learning models including logistic regression, naive Bayes, and random forests for clickbait classification. However, these methods need heavy feature engineering, which depends on a large amount of domain knowledge. In addition, handcrafted features are usually not optimal in representing the textual content of webpages since they cannot effectively model the contexts of words. In recent years, several approaches explore to use deep learning techniques for clickbait detection (Agrawal, 2016; Fu et al., 2017; Zhou, 2017; Thomas, 2017; Dimpas et al., 2017; Anand et al., 2017; Zheng et al., 2018; Dong et al., 2019) . For example, Agrawal et al. (2016) proposed a neural clickbait detection approach, which uses convolutional neural network (CNN) with max pooling techniques to learn representations of titles. proposed to use a bi-GRU network to learn contextual word representations, and use an attention network to select important words for learning informative tweet representations for clickbait detection. proposed to learn title representations with an attentive bi-GRU network, and used two Siamese networks to respectively measure the relevance between the title and body and the relevance between the associated image and body. They combined the title representation and the relevance vectors for final prediction. Dong et al. (2019) proposed a similarity-aware clickbait detection model. They used a combination of bi-GRU network and attention network to learn title and body representations, and computed a similarity vector based on the global and local vector similarities between the representations of titles and bodies. They combined the title and body representations with the similarity vector for clickbait prediction. However, these methods do not consider the stylistic patterns of titles when learning their representations, which are important cues for clickbait detection. In addition, they do not consider the interactions between the contexts in the title and body, which are usually important for evaluating their relevance. Different from existing methods, our approach incorporates a character-level Transformer to capture the stylistic patterns of title, which can help recognize clickbaits more accurately. In addition, it can model the interactions between title and body via co-attention to enhance their representations.", |
| "cite_spans": [ |
| { |
| "start": 239, |
| "end": 258, |
| "text": "(Chen et al., 2015;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 259, |
| "end": 279, |
| "text": "Biyani et al., 2016;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 280, |
| "end": 302, |
| "text": "Potthast et al., 2016;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 303, |
| "end": 328, |
| "text": "Chakraborty et al., 2016;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 329, |
| "end": 352, |
| "text": "Bourgonje et al., 2017;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 353, |
| "end": 370, |
| "text": "Cao et al., 2017;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 371, |
| "end": 395, |
| "text": "Indurthi and Oota, 2017;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 396, |
| "end": 416, |
| "text": "Ge\u00e7kil et al., 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 432, |
| "end": 450, |
| "text": "Chen et al. (2015)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 993, |
| "end": 1013, |
| "text": "Biyani et al. (2016)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1538, |
| "end": 1560, |
| "text": "Potthast et al. (2016)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 2373, |
| "end": 2388, |
| "text": "(Agrawal, 2016;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 2389, |
| "end": 2405, |
| "text": "Fu et al., 2017;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 2406, |
| "end": 2417, |
| "text": "Zhou, 2017;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 2418, |
| "end": 2431, |
| "text": "Thomas, 2017;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 2432, |
| "end": 2452, |
| "text": "Dimpas et al., 2017;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 2453, |
| "end": 2472, |
| "text": "Anand et al., 2017;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 2473, |
| "end": 2492, |
| "text": "Zheng et al., 2018;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 2493, |
| "end": 2511, |
| "text": "Dong et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 2527, |
| "end": 2548, |
| "text": "Agrawal et al. (2016)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 3222, |
| "end": 3240, |
| "text": "Dong et al. (2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we introduce our proposed clickbait detection approach with style-aware title modeling and co-attention (SATC). The framework of our proposed SATC approach is illustrated in Fig. 2 . It consists of four core modules, i.e., a content modeling module to learn representations of title and body from their content, a style modeling module to capture the stylistic patterns in the title, an interaction modeling module to capture the interactions between the contexts within title and body, and a clickbait prediction module to compute the clickbait score. The details of each module are introduced as follows. Figure 2 : The architecture of our SATC approach for clickbait detection.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 191, |
| "end": 197, |
| "text": "Fig. 2", |
| "ref_id": null |
| }, |
| { |
| "start": 624, |
| "end": 632, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The content modeling module is used to learn the representations of title and body from their content. We respectively denote the sequences of words in title and body as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Modeling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "[w t 1 , w t 2 , ..., w t N ] and [w b 1 , w b 2 , ..., w b P ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Modeling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": ", where N and P respectively stand for the number of words in the title and body. In this module, we first use a word embedding layer to convert both word sequences into sequences of semantic vectors, which are denoted as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Modeling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "[w t 1 , w t 2 , ..., w t N ] and [w b 1 , w b 2 , ..., w b P ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Modeling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": ". Usually the contexts of words in title and body are important for modeling their content. For example, in the title of the first webpage in Fig. 1 , the contexts of the word \"Loss\" such as \"Weight\" and \"Exercise\" are useful clues for understanding that this word is about fitness rather than financial loss. Transformer (Vaswani et al., 2017) is an effective neural architecture for context modeling. Thus, we apply two independent Transformers to learn hidden representations of words in title and body by modeling their contexts. We denote the hidden representation sequences of words in title and body as E t = [e t 1 , e t 2 , ..., e t N ] and E b = [e b 1 , e b 2 , ..., e b P ], respectively. Different words in a title or body may have different importance for modeling the content. For instance, the word \"MUST\" in Fig. 1 is more important than the word \"About\" in learning title representation for clickbait detection. Thus, we apply attention mechanisms (Yang et al., 2016) to select words in the title and body to form unified representations for them (denoted as e t and e b ), which are respectively formulated as follows:", |
| "cite_spans": [ |
| { |
| "start": 322, |
| "end": 344, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 966, |
| "end": 985, |
| "text": "(Yang et al., 2016)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 142, |
| "end": 148, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 825, |
| "end": 831, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Content Modeling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "e t = Attention([e t 1 , e t 2 , ..., e t N ]),", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Content Modeling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "e b = Attention([e b 1 , e b 2 , ..., e b P ]).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Modeling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "(2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Modeling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The style modeling module is used to capture the stylistic patterns in the title to better identify clickbaits. Usually, there are some common patterns on the style of clickbait titles. For example, many clickbaits use all-capital words (e.g., \"MUST\", \"NOT\" and \"THIS\"), exclamation marks, and numeric characters to attract users' attention. Thus, it is very important to grasp these stylistic patterns in clickbait detection. To capture these patterns, we propose to use a character-level Transformer to learn style-aware title representations from its original characters. We denote the character sequence (including whitespace) of the title as [c 1 , c 2 , ..., c M ], where M is the number of characters. We first convert these characters into their embeddings (denoted as [c 1 , c 2 , ..., c M ]) via a character embedding layer, and then use a character Transformer to learn the hidden representations of these characters, which are denoted as [e c 1 , e c 2 , ..., e c M ]. Usually different characters may have different importance in style modeling. For example, in Fig. 1 the character \"7\" is more important than the character \"a\" in the word \"and\". Thus, we use a character-level attention network for character selection in building the style-aware title representation e c , which is formulated as follows:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1075, |
| "end": 1081, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Style Modeling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "e c = Attention([e c 1 , e c 2 , ..., e c M ]).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Style Modeling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Style Modeling", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The interaction modeling module is used to capture the interactions between title and body. For most webpages, the contexts in their titles usually have relatedness with the contexts in their bodies to a certain extent. For instance, the words \"Restaurants\" in the title of the third webpage in Fig. 1 have close relatedness with the words \"businesses\", \"restaurants\" and \"cafes\" in the body. These interactions are important cues for modeling the relevance between title and body, which is critical for clickbait detection. Thus, we propose to use a multi-head co-attention network to capture the interactions between title and body. More specifically, we first use the title word representation sequence E t as the query, and use the body word representation sequence E b as the key and value to compute a hidden representation sequence", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 295, |
| "end": 301, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "H t = [h t 1 , h t 2 , ..., h t N ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": ", which summarizes the contexts within body and their interactions with each word in the title. This process is formulated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H t = M ultiHead(E t , E b , E b ).", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Next, we use the body word representation sequence E b as the query, and use the title word representation sequence E t as the key and value to compute an hidden representation sequence", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "H b = [h b 1 , h b 2 , .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": ".., h b P ] that conveys the contexts in title and their interactions with each word in body, which is formulated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H b = M ultiHead(E b , E t , E t ).", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Then, we use the interactions between title and body to enhance their representations. We add the hidden representation sequence H t to the original word representation sequence E t to form a unified representation sequence R t , i.e., R t = E t + H t . The unified body word representation sequence R b is obtained by R b = E b + H b . Similar to the content modeling module, we also use attention networks to obtain the final interaction-enhanced representations of title and body (denoted as r t and r b ), which are formulated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "r t = Attention([r t 1 , r t 2 , ..., r t N ]),", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "r b = Attention([r b 1 , r b 2 , ..., r b P ]),", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where r t i and r b i stand for the i-th vector in R t and R b , respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interaction Modeling", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The clickbait prediction module is used to compute a clicbait score based on the representations of title and body. We first use a dense layer to compute a title content score y t based on the content representation e t of the title, which is formulated as y t = w t e t + b t , where w t and b t are the kernel and bias parameters. We compute a body content score y b based on e b in a similar way, which is formulated as y b = w b e b + b b , where w b and b b are parameters. Next, we use a matcher to compute a title-body matching score, which indicates the relevance between title and body. It takes the interaction-enhanced representations of title and body (r t and r b ) as the input, and outputs the matching score y r . Following (Okura et al., 2017) , we use dot-product to implement the matcher, and the score y r is computed as y r = r t \u2022 r b . Then, we use another dense layer to compute a title stylistic score based on the style-aware title representation e c , which is formulated as y s = w s e s + b s , where w s and b s are parameters. The final clickbait score y is a weighted summation of the aforementioned four scores and we use the sigmoid function for normalization, which is formulated as follows:", |
| "cite_spans": [ |
| { |
| "start": 740, |
| "end": 760, |
| "text": "(Okura et al., 2017)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clickbait Prediction", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y = sigmoid(\u03b1 s y s + \u03b1 t y t + \u03b1 r y r + \u03b1 b y b ),", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Clickbait Prediction", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where \u03b1 s , \u03b1 t , \u03b1 r and \u03b1 b are trainable parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clickbait Prediction", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "For model training, we use binary cross-entropy as the loss function. By comparing the predicted clickbait score with the gold label, we can obtain the loss on the training samples, and further compute the gradients for model update.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clickbait Prediction", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Our experiments are conducted on two benchmark datasets for clickbait detection. The first one is Clickbait Challenge 1 , which is a dataset released by the organizers of Clickbait Challenge 2017. This dataset contains the tweet texts posted by users and the content of the corresponding article. Each pair of tweet and article is annotated by 5 judgers, where each judger gives a clickbait score from 0 (non-clickabit) to 1 (clickbait) to this pair. Following (Dong et al., 2019) , we regard the pairs with the mean score over 0.5 as clickbaits. The training set contains 19,538 pairs, and the validation set contains 2,495 pairs. Since the labels of the test set are not released, we evaluate the model on the current validation set, and randomly sample 10% of pairs in the training set for validation. The second one is FNC 2 , which is released by the Fake News Challenge in 2017. In this dataset, each pair of title and body is labeled as \"agree\", \"disagree\", \"discuss\" or \"unrelated\". Following (Dong et al., 2019) , we regard the pairs with \"unrelated\" labels as clickbaits. This dataset contains 49,972 pairs of titles and bodies for training and 25,413 pairs for test. We also use 10% of training samples for validation.", |
| "cite_spans": [ |
| { |
| "start": 461, |
| "end": 480, |
| "text": "(Dong et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1001, |
| "end": 1020, |
| "text": "(Dong et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and Experimental Settings", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In our experiments, we use the pre-trained 300-dimensional Glove embeddings (Pennington et al., 2014) to initialize the parameters in the word embedding layer. We do not fine-tune these pre-trained word embeddings in model training to avoid overfitting. The character embeddings are 50-dimensional. The Transformers have two self-attention layers. Each layer has 8 attention heads, and the output dimension of each head is 32. We apply dropout (Srivastava et al., 2014) to the word and character embeddings at a ratio of 20%. We use Adam (Kingma and Ba, 2014) as the optimizer, and the learning rate is 0.01. The size of each mini-batch is 64. These hyperparameters are searched according to the performance on the validation sets. Each experiment is repeated 5 times, and the average results in terms of accuracy, precision, recall and Fscore are reported.", |
| "cite_spans": [ |
| { |
| "start": 76, |
| "end": 101, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 444, |
| "end": 469, |
| "text": "(Srivastava et al., 2014)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and Experimental Settings", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We compare our SATC method with several baseline methods, including:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 DSSM (Huang et al., 2013) , deep structured semantic model, where title is regarded as the query and body is regarded as document. The texts of title and body are represented by N-gram featuress.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 27, |
| "text": "(Huang et al., 2013)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 CLSM (Shen et al., 2014) , a variant of DSSM that uses CNN to learn text representations;", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 26, |
| "text": "(Shen et al., 2014)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 CNN (Agrawal, 2016; Zheng et al., 2018) , which detects clickbaits solely based on titles. Text-CNN is used to learn title representations.", |
| "cite_spans": [ |
| { |
| "start": 6, |
| "end": 21, |
| "text": "(Agrawal, 2016;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 22, |
| "end": 41, |
| "text": "Zheng et al., 2018)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 LSTM (Glenski et al., 2017) , using LSTM networks to learn title and body representations for clickbait detection.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 29, |
| "text": "(Glenski et al., 2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 GRU-Att (Zhou, 2017) , using a combination of bi-GRU network and attention network to learn title representations for clickbait detection. \u2022 SiameseNet , which uses GRU-Att to learn title representations and uses Siamese networks to capture the relevance between title and body.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 22, |
| "text": "(Zhou, 2017)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 LSDA (Dong et al., 2019) , which uses GRU-Att to learn title and body representations, and measures their relevance using the global and local similarities between the representation vectors of title and body.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 26, |
| "text": "(Dong et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": null |
| }, |
| { |
| "text": "The results on the two datasets are summarized in Table 1 . 3 According to the results, we have several main findings. First, the methods that use neural networks to learn text representations (e.g., CNN, LSTM, GRU-Att and SATC) outperform the DSSM method that uses handcrafted features for text representation. It shows that handcrafted features are usually not-optimal in representing the textual content of webpages for clickbait detection. Second, the methods based on attention mechanisms (e.g., GRU-Att and LSDA) usually outperform the methods without attention (e.g., CNN and LSTM). This is probably because attention mechanism can select important contexts within title and body to learn more informative representations for them, which is beneficial for clickbait detection. Third, our approach can consistently outperform the compared baseline methods. This is because our approach can capture the stylistic patterns in the title to learn style-aware title representations, and meanwhile can model the interactions between contexts in title and body to help measure their relevance more accurately. In addition, Transformers may also have a greater ability than CNN, LSTM and GRU in context modeling. Thus, our method can detect clickbaits more effectively than baseline methods.", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 61, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 50, |
| "end": 57, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": null |
| }, |
| { |
| "text": "In this section, we conduct several ablation studies to explore the influence of the four clickbait scores. We compare the performance of our SATC approach by removing one of these scores in clickbait prediction. The results on the Clickbait Challenge and FNC datasets are respectively shown in Figs. 3(a) and 3(b) . From the results, we find that the title content score plays the most important role. This is intuitive because clickbaits mainly rely on the content of their titles to attract users' attention and clicks. Thus, modeling the title content is critical for clickbait detection. In addition, we find the body content score is also important. This is because the body of many clickbaits may be misleading or uninformative. Thus, modeling the content of body is important for clickbait detection. Besides, the matching score is also useful for clickbait prediction. This is probably because the titles of some clickbaits do not perfectly match their bodies. Thus, modeling the relevance of title and body is useful for accurate clickbait detection. Moreover, we find the title stylistic score is also helpful. This is mainly because the stylistic patterns of title are important clues for identifying clickbaits, but these clues may not be captured by the content modeling module. Thus, the title stylistic score can provide complementary information to help detect clickbaits better. These results verify the effectiveness of the four different clickbait scores in our approach. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 295, |
| "end": 314, |
| "text": "Figs. 3(a) and 3(b)", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Influence of Different Scores", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In this section, we verify the effectiveness of the word-level attention, character-level attention and co-attention networks in our approach. More specifically, we compare the performance of our SATC approach and its variants without one kind of attention. The results on the Clickbait Challenge and FNC datasets are respectively shown in Figs. 4(a) and 4(b). We find that the word-level attention network is very helpful. This may be because different words are usually diverse in their informativeness and the work-level attention networks can attend to the important words in title and body, which can help learn more informative representations of them. In addition, the co-attention network can also effectively improve the model performance. This may be because the co-attention network can model the interactions of words in title and body and can further enhance the title and body representations by encoding interaction information, which is beneficial for evaluating the relevance between title and body. Besides, the character-level attention network can also improve the performance to some extent. This may be because different characters also have different importance in modeling the stylistic patterns of the title and the character-level attention network is able to select useful characters, which can help learn more informative style-aware title representations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 340, |
| "end": 350, |
| "text": "Figs. 4(a)", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effectiveness of Attention Mechanism", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "In this section, we verify the effectiveness of Transformers in text modeling in our approach. We compare the performance of SATC and its several variants using CNN, LSTM and GRU for text modeling, and the results are illustrated in Figs. 5(a) and 5(b). From the results, we find that using CNN is not optimal in text modeling for clickbait detection. This is because CNN can only capture local contexts, while the long-distance contexts are not considered. In addition, we find GRU slightly outperforms LSTM. This may be because the GRU networks contain fewer parameters and have a lower risk of overfitting. Besides, Transformer outperforms LSTM and GRU. This is because Transformer is very effective in modeling the relations between contexts, which has also been validated by existing works (Vaswani et al., 2017) . Thus, we prefer Transformer for learning text representations for clickbait detection.", |
| "cite_spans": [ |
| { |
| "start": 795, |
| "end": 817, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effectiveness of Transformer", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "In this section, we explore the influence of using different methods to implement the matcher in our approach to compute the matching score. We compare the performance of SATC using dot-product, dense network and cosine similarity as the matcher. The results are illustrated in Figs. 6(a) and 6(b). From the results, we find that using a dense network is not optimal. According to (Rendle et al., 2020), a possible reason is that dense network is difficult to measure the similarity between two vectors, and thereby the matching score may be inaccurate. In addition, we find that using dot-product is slightly better than using cosine similarity. This may be because the cosine similarity function is not sensitive to the length of the input vectors, which may not be optimal for measuring the relevance between the title and body. Thus, we choose dot-product to implement the matcher in our method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Influence of Matching Methods", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "In this section, we conduct several case studies to better understand the characteristics of our approach. The title, body, groundtruth and the predictions results of GRU-Att, LSDA and our SATC on several samples are shown in Table 2 , and we have several findings. In Table 2 , the first sample is a clickbait because its title does not match its body. However, since the GRU-Att method only considers the information of title, it fails to detect this clickbait. The other two methods that consider the relevance between title and body classify this sample correctly. Thus, it is important to model the title-body relevance for clickbait detection. The title of the second sample in Table 2 contains a word with repeated characters to express strong emotion, which is an important indication of clickbaits. However, this word is out-of-vocabulary, making it difficult for the GRU-Att and LSDA methods to capture this clue. Thus, these methods fail to detect this clickbait. Different from them, our approach uses a character-level Transformer to capture the stylistic patterns in the title, and thereby can detect this clickbait at a high confidence. The third sample in Table 2 is not a clickbait because the title is formal and the title is relevant to the body. However, it is not easy to measure the relevance between the title and body of this sample without considering the interactions between their words, since the body does not frequently mention the words like \"US\" and \"Watch\" that appear in the title. Thus, the LSDA method, which does not consider the interactions between contexts, incorrectly classifies this sample as a clickbait. Since our approach uses a co-attention network to model title-body interactions, it classifies this sample correctly.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 226, |
| "end": 233, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 269, |
| "end": 276, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 684, |
| "end": 691, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 1172, |
| "end": 1179, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Case Study", |
| "sec_num": "4.7" |
| }, |
| { |
| "text": "In this paper, we propose a clickbait detection approach with style-aware title modeling and co-attention, which can capture the stylistic patterns in the title and the interactions between the contexts in the title and body. We use Transformers to learn content representations of title and body, and respectively compute two content-based clickbait scores for them based on their representations. In addition, we propose to apply a character-level Transformer to capture the stylistic patterns of title for learning style-aware title representations, which are further used to compute a title stylistic score. Besides, we propose to use a co-attention network to model the relatedness between the contexts within title and body, and further combine their original representations with the interaction information to learn interaction-enhanced title and body representations, which are further used to compute a title-body matching score. The final clickbait score is predicted by a weighted summation of the four kinds of clickbait scores. Extensive experiments on two benchmark datasets show that our approach can effectively improve the performance of clickbait detection by using style-aware title modeling to capture stylistic information and co-attention networks to model title-body interactions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://www.clickbait-challenge.org/. 2 http://www.fakenewschallenge.org/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Most results of baselines are taken from(Dong et al., 2019), except the result of Siamese Net on the Clickbait Challenge dataset since it is quite unsatisfactory. We report the results using our implementation instead.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by the National Key Research and Development Program of China under Grant number 2018YFC1604002, the National Natural Science Foundation of China under Grant numbers U1936208, U1936216, U1836204, and U1705261.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Clickbait detection using deep learning", |
| "authors": [ |
| { |
| "first": "Amol", |
| "middle": [], |
| "last": "Agrawal", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "2016 2nd International Conference on Next Generation Computing Technologies (NGCT)", |
| "volume": "", |
| "issue": "", |
| "pages": "268--272", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amol Agrawal. 2016. Clickbait detection using deep learning. In 2016 2nd International Conference on Next Generation Computing Technologies (NGCT), pages 268-272. IEEE.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "We used neural networks to detect clickbaits: You won't believe what happened next! In ECIR", |
| "authors": [ |
| { |
| "first": "Ankesh", |
| "middle": [], |
| "last": "Anand", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanmoy", |
| "middle": [], |
| "last": "Chakraborty", |
| "suffix": "" |
| }, |
| { |
| "first": "Noseong", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "541--547", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankesh Anand, Tanmoy Chakraborty, and Noseong Park. 2017. We used neural networks to detect clickbaits: You won't believe what happened next! In ECIR, pages 541-547. Springer.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "8 amazing secrets for getting more clicks\": Detecting clickbaits in news streams using article informality", |
| "authors": [ |
| { |
| "first": "Prakhar", |
| "middle": [], |
| "last": "Biyani", |
| "suffix": "" |
| }, |
| { |
| "first": "Kostas", |
| "middle": [], |
| "last": "Tsioutsiouliklis", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Blackmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Prakhar Biyani, Kostas Tsioutsiouliklis, and John Blackmer. 2016. \" 8 amazing secrets for getting more clicks\": Detecting clickbaits in news streams using article informality. In AAAI.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "From clickbait to fake news detection: an approach based on detecting the stance of headlines to articles", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Bourgonje", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [ |
| "Moreno" |
| ], |
| "last": "Schneider", |
| "suffix": "" |
| }, |
| { |
| "first": "Georg", |
| "middle": [], |
| "last": "Rehm", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 EMNLP Workshop: Natural Language Processing meets Journalism", |
| "volume": "", |
| "issue": "", |
| "pages": "84--89", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Bourgonje, Julian Moreno Schneider, and Georg Rehm. 2017. From clickbait to fake news detection: an approach based on detecting the stance of headlines to articles. In Proceedings of the 2017 EMNLP Workshop: Natural Language Processing meets Journalism, pages 84-89.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Machine learning based detection of clickbait posts in social media", |
| "authors": [ |
| { |
| "first": "Xinyue", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Thai", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1710.01977" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xinyue Cao, Thai Le, et al. 2017. Machine learning based detection of clickbait posts in social media. arXiv preprint arXiv:1710.01977.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Stop clickbait: Detecting and preventing clickbaits in online news media", |
| "authors": [ |
| { |
| "first": "Abhijnan", |
| "middle": [], |
| "last": "Chakraborty", |
| "suffix": "" |
| }, |
| { |
| "first": "Bhargavi", |
| "middle": [], |
| "last": "Paranjape", |
| "suffix": "" |
| }, |
| { |
| "first": "Sourya", |
| "middle": [], |
| "last": "Kakarla", |
| "suffix": "" |
| }, |
| { |
| "first": "Niloy", |
| "middle": [], |
| "last": "Ganguly", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", |
| "volume": "", |
| "issue": "", |
| "pages": "9--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abhijnan Chakraborty, Bhargavi Paranjape, Sourya Kakarla, and Niloy Ganguly. 2016. Stop clickbait: Detecting and preventing clickbaits in online news media. In 2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM), pages 9-16. IEEE.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Misleading online content: recognizing clickbait as\" false news", |
| "authors": [ |
| { |
| "first": "Yimin", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Victoria", |
| "middle": [ |
| "L" |
| ], |
| "last": "Conroy", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rubin", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 ACM on workshop on multimodal deception detection", |
| "volume": "", |
| "issue": "", |
| "pages": "15--19", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yimin Chen, Niall J Conroy, and Victoria L Rubin. 2015. Misleading online content: recognizing clickbait as\" false news\". In Proceedings of the 2015 ACM on workshop on multimodal deception detection, pages 15-19.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Filipino and english clickbait detection using a long short term memory recurrent neural network", |
| "authors": [ |
| { |
| "first": "Royce", |
| "middle": [ |
| "Vincent" |
| ], |
| "last": "Philogene Kyle Dimpas", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [ |
| "Jane" |
| ], |
| "last": "Po", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sabellano", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "In IALP", |
| "volume": "", |
| "issue": "", |
| "pages": "276--280", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philogene Kyle Dimpas, Royce Vincent Po, and Mary Jane Sabellano. 2017. Filipino and english clickbait detection using a long short term memory recurrent neural network. In IALP, pages 276-280. IEEE.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Similarity-aware deep attentive model for clickbait detection", |
| "authors": [ |
| { |
| "first": "Manqing", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Lina", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Xianzhi", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Boualem", |
| "middle": [], |
| "last": "Benatallah", |
| "suffix": "" |
| }, |
| { |
| "first": "Chaoran", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "PAKDD", |
| "volume": "", |
| "issue": "", |
| "pages": "56--69", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manqing Dong, Lina Yao, Xianzhi Wang, Boualem Benatallah, and Chaoran Huang. 2019. Similarity-aware deep attentive model for clickbait detection. In PAKDD, pages 56-69. Springer.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A convolutional neural network for clickbait detection", |
| "authors": [ |
| { |
| "first": "Junfeng", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinkun", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)", |
| "volume": "", |
| "issue": "", |
| "pages": "6--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junfeng Fu, Liang Liang, Xin Zhou, and Jinkun Zheng. 2017. A convolutional neural network for clickbait detection. In 2017 4th International Conference on Information Science and Control Engineering (ICISCE), pages 6-10. IEEE.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A clickbait detection method on news sites", |
| "authors": [ |
| { |
| "first": "Ayse", |
| "middle": [], |
| "last": "Ge\u00e7kil", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmet", |
| "middle": [ |
| "Anil" |
| ], |
| "last": "M\u00fcngen", |
| "suffix": "" |
| }, |
| { |
| "first": "Esra", |
| "middle": [], |
| "last": "G\u00fcndogan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mehmet", |
| "middle": [], |
| "last": "Kaya", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "2018 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", |
| "volume": "", |
| "issue": "", |
| "pages": "932--937", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ayse Ge\u00e7kil, Ahmet Anil M\u00fcngen, Esra G\u00fcndogan, and Mehmet Kaya. 2018. A clickbait detection method on news sites. In 2018 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM), pages 932-937. IEEE.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Fishing for clickbaits in social images and texts with linguistically-infused neural network models", |
| "authors": [ |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Glenski", |
| "suffix": "" |
| }, |
| { |
| "first": "Ellyn", |
| "middle": [], |
| "last": "Ayton", |
| "suffix": "" |
| }, |
| { |
| "first": "Dustin", |
| "middle": [], |
| "last": "Arendt", |
| "suffix": "" |
| }, |
| { |
| "first": "Svitlana", |
| "middle": [], |
| "last": "Volkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1710.06390" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maria Glenski, Ellyn Ayton, Dustin Arendt, and Svitlana Volkova. 2017. Fishing for clickbaits in social images and texts with linguistically-infused neural network models. arXiv preprint arXiv:1710.06390.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Learning deep structured semantic models for web search using clickthrough data", |
| "authors": [ |
| { |
| "first": "Po-Sen", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Acero", |
| "suffix": "" |
| }, |
| { |
| "first": "Larry", |
| "middle": [], |
| "last": "Heck", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "CIKM", |
| "volume": "", |
| "issue": "", |
| "pages": "2333--2338", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Po-Sen Huang, Xiaodong He, Jianfeng Gao, Li Deng, Alex Acero, and Larry Heck. 2013. Learning deep struc- tured semantic models for web search using clickthrough data. In CIKM, pages 2333-2338.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Clickbait detection using word embeddings", |
| "authors": [ |
| { |
| "first": "Vijayasaradhi", |
| "middle": [], |
| "last": "Indurthi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Subba Reddy Oota", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1710.02861" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vijayasaradhi Indurthi and Subba Reddy Oota. 2017. Clickbait detection using word embeddings. arXiv preprint arXiv:1710.02861.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Identifying clickbait: A multi-strategy approach using neural networks", |
| "authors": [ |
| { |
| "first": "Vaibhav", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhruv", |
| "middle": [], |
| "last": "Khattar", |
| "suffix": "" |
| }, |
| { |
| "first": "Siddhartha", |
| "middle": [], |
| "last": "Gairola", |
| "suffix": "" |
| }, |
| { |
| "first": "Yash", |
| "middle": [], |
| "last": "Kumar Lal", |
| "suffix": "" |
| }, |
| { |
| "first": "Vasudeva", |
| "middle": [], |
| "last": "Varma", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "SIGIR", |
| "volume": "", |
| "issue": "", |
| "pages": "1225--1228", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vaibhav Kumar, Dhruv Khattar, Siddhartha Gairola, Yash Kumar Lal, and Vasudeva Varma. 2018. Identifying clickbait: A multi-strategy approach using neural networks. In SIGIR, pages 1225-1228.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Embedding-based news recommendation for millions of users", |
| "authors": [ |
| { |
| "first": "Shumpei", |
| "middle": [], |
| "last": "Okura", |
| "suffix": "" |
| }, |
| { |
| "first": "Yukihiro", |
| "middle": [], |
| "last": "Tagami", |
| "suffix": "" |
| }, |
| { |
| "first": "Shingo", |
| "middle": [], |
| "last": "Ono", |
| "suffix": "" |
| }, |
| { |
| "first": "Akira", |
| "middle": [], |
| "last": "Tajima", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "KDD", |
| "volume": "", |
| "issue": "", |
| "pages": "1933--1942", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shumpei Okura, Yukihiro Tagami, Shingo Ono, and Akira Tajima. 2017. Embedding-based news recommendation for millions of users. In KDD, pages 1933-1942.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representa- tion. In EMNLP, pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Clickbait detection", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Potthast", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "K\u00f6psel", |
| "suffix": "" |
| }, |
| { |
| "first": "Benno", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Hagen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "ECIR", |
| "volume": "", |
| "issue": "", |
| "pages": "810--817", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin Potthast, Sebastian K\u00f6psel, Benno Stein, and Matthias Hagen. 2016. Clickbait detection. In ECIR, pages 810-817. Springer.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Neural collaborative filtering vs. matrix factorization revisited", |
| "authors": [ |
| { |
| "first": "Walid", |
| "middle": [], |
| "last": "Steffen Rendle", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Krichene", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Anderson", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.09683" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steffen Rendle, Walid Krichene, Li Zhang, and John Anderson. 2020. Neural collaborative filtering vs. matrix factorization revisited. arXiv preprint arXiv:2005.09683.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "A latent semantic model with convolutional-pooling structure for information retrieval", |
| "authors": [ |
| { |
| "first": "Yelong", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Gr\u00e9goire", |
| "middle": [], |
| "last": "Mesnil", |
| "suffix": "" |
| }, |
| { |
| "first": ";", |
| "middle": [], |
| "last": "Geoffrey", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "CIKM", |
| "volume": "15", |
| "issue": "", |
| "pages": "1929--1958", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yelong Shen, Xiaodong He, Jianfeng Gao, Li Deng, and Gr\u00e9goire Mesnil. 2014. A latent semantic model with convolutional-pooling structure for information retrieval. In CIKM, pages 101-110. Computational Linguistics Nitish Srivastava, Geoffrey E Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: a simple way to prevent neural networks from overfitting. JMLR, 15(1):1929-1958.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Clickbait identification using neural networks", |
| "authors": [ |
| { |
| "first": "Philippe", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1710.08721" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philippe Thomas. 2017. Clickbait identification using neural networks. arXiv preprint arXiv:1710.08721.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In NIPS, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Hierarchical attention networks for document classification", |
| "authors": [ |
| { |
| "first": "Zichao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Diyi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "1480--1489", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alex Smola, and Eduard Hovy. 2016. Hierarchical attention networks for document classification. In NAACL-HLT, pages 1480-1489.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Clickbait convolutional neural network. Symmetry", |
| "authors": [ |
| { |
| "first": "Hai-Tao", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jin-Yuan", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Arun", |
| "middle": [], |
| "last": "Kumar Sangaiah", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Cong-Zhi", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "10", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hai-Tao Zheng, Jin-Yuan Chen, Xin Yao, Arun Kumar Sangaiah, Yong Jiang, and Cong-Zhi Zhao. 2018. Clickbait convolutional neural network. Symmetry, 10(5):138.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Clickbait detection in tweets using self-attentive network", |
| "authors": [ |
| { |
| "first": "Yiwei", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1710.05364" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yiwei Zhou. 2017. Clickbait detection in tweets using self-attentive network. arXiv preprint arXiv:1710.05364.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Several illustrative examples of clickbaits." |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Influence of removing different scores in clickbait prediction." |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Effectiveness of different attention networks." |
| }, |
| "FIGREF5": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Effectiveness of Transformer in text modeling." |
| }, |
| "FIGREF6": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Influence of using different methods for computing matching scores." |
| }, |
| "TABREF0": { |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Download our app today</td></tr><tr><td>and get what you want!</td></tr><tr><td>Consider joining this</td></tr><tr><td>community as a helpful</td></tr><tr><td>resource...</td></tr></table>", |
| "html": null, |
| "text": "Title 7 Things You MUST Know About Exercise and Weight Loss Covid-19 news in your area You Won't Believe How Many Beloved Mom-and-Pop Restaurants are Closing Body The biggest challenge for an obese person is losing a few extra pounds. Well, people sometimes let themselves eat what they like..." |
| }, |
| "TABREF3": { |
| "num": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Title</td><td>Body</td><td>Label</td><td colspan=\"3\">Prediction GRU-Att LSDA SATC</td></tr><tr><td>Report: NHL expansion to Las Vegas'a done deal'</td><td/><td>1</td><td>0.07</td><td>0.88</td><td>0.95</td></tr><tr><td>The real-life Indiana Jane will make you soooooooooo jealous of her life</td><td>Meet the real-life Indiana Jane: wildernesses... American adventurer spends her life in dangerous jungles and uncharted</td><td>1</td><td>0.23</td><td>0.16</td><td>0.98</td></tr><tr><td/><td>Lately, Apple CEO has been making the</td><td/><td/><td/><td/></tr><tr><td>Apple Watch may be available outside US shortly after launch</td><td>rounds in Europe, stopping at various The last time we heard anything about stores and chatting with employees.</td><td>0</td><td>0.12</td><td>0.68</td><td>0.05</td></tr><tr><td/><td>his commentary on Apple Watch...</td><td/><td/><td/><td/></tr></table>", |
| "html": null, |
| "text": "Brain surgery recovery can be a gamble, but not everybody wakes up in the middle of the procedure..." |
| }, |
| "TABREF4": { |
| "num": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null, |
| "text": "The titles, bodies, labels and the predicted scores of different methods on several samples. 0 stands for non-clickbait and 1 stands for clickbait." |
| } |
| } |
| } |
| } |