| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T06:35:08.819498Z" |
| }, |
| "title": "\"A Little Birdie Told Me ... \" -Inductive Biases for Rumour Stance Detection on Social Media", |
| "authors": [ |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Radhakrishnan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Language Technologies Institute Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Kanakagiri", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Language Technologies Institute Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Sharanya", |
| "middle": [], |
| "last": "Chakravarthy", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Language Technologies Institute Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "sharanyc@cs.cmu.edu" |
| }, |
| { |
| "first": "Vidhisha", |
| "middle": [], |
| "last": "Balachandran", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Language Technologies Institute Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The rise in the usage of social media has placed it in a central position for news dissemination and consumption. This greatly increases the potential for proliferation of rumours and misinformation. In an effort to mitigate the spread of rumours, we tackle the related task of identifying the stance (Support, Deny, Query, Comment) of a social media post. Unlike previous works (Fajcik et al., 2019; Yang et al., 2019), we impose inductive biases that capture platform specific user behavior. These biases, coupled with social media finetuning of BERT allow for better language understanding, thus yielding an F 1 score of 58.7 on the SemEval 2019 task on rumour stance detection.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The rise in the usage of social media has placed it in a central position for news dissemination and consumption. This greatly increases the potential for proliferation of rumours and misinformation. In an effort to mitigate the spread of rumours, we tackle the related task of identifying the stance (Support, Deny, Query, Comment) of a social media post. Unlike previous works (Fajcik et al., 2019; Yang et al., 2019), we impose inductive biases that capture platform specific user behavior. These biases, coupled with social media finetuning of BERT allow for better language understanding, thus yielding an F 1 score of 58.7 on the SemEval 2019 task on rumour stance detection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Social media has seen an exponential growth, replacing traditional news sources as the primary news source. The apparent value of interesting truth-like news and the ease of access to such news jointly make social media a hotbed for rumours, misinformation and fake news. In the absence of an authority to verify or debunk a rumour, social media users often share their own thoughts on its veracity, creating a collaborative inter-subjective sense-making to determine the veracity of the rumour. Hence, an important step in achieving the objective of veracity detection is tracking how other users opine on the accuracy of the rumourous story (Zubiaga et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 643, |
| "end": 665, |
| "text": "(Zubiaga et al., 2018)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The content on these social media platforms vary in their topics, style, sentiments, and structure (Manikonda et al.) . Reddit, for example, is used for gathering a comprehensive view of opinions from users in a short period of time. On the other hand, an event on Twitter is alive for a longer duration and is used for following the development and evolution of an event (Priya et al., 2019 ).", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 117, |
| "text": "(Manikonda et al.)", |
| "ref_id": null |
| }, |
| { |
| "start": 372, |
| "end": 391, |
| "text": "(Priya et al., 2019", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "It is also important to effectively utilize the context surrounding a particular tweet and model the exchanges in a conversation as they often contain crucial background information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u21e4 Equal contribution", |
| "sec_num": null |
| }, |
| { |
| "text": "Given the extensive usage of sarcasm and rhetoric in expressing opinions (Carvalho et al.) , understanding 'social media' style of text is also essential for effective stance identification.", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 90, |
| "text": "(Carvalho et al.)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u21e4 Equal contribution", |
| "sec_num": null |
| }, |
| { |
| "text": "In this work, we impose inductive biases accounting for the underlying social media platform, conversational context, and noisy social media style of text to improve on the task of rumour detection. To the best extent of our knowledge, this is the first work that applies inductive biases inspired from a deep analysis of communities and their usage patterns to the task of stance identification. We achieve a Macro F 1 score of 58.7 on the 2019 Se-mEval RumourEval task with novel techniques that surpass state of the art models (non-ensemble) by 2 F 1 . The code for our approaches will be made available on GitHub 1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "\u21e4 Equal contribution", |
| "sec_num": null |
| }, |
| { |
| "text": "To judge the veracity of a social media post, it is useful to analyze the surrounding discourse (comments/replies) by other users. The discourse is initiated by a SOURCE post and followed by treestructured threads. Each post in a thread is made in response to a PARENT post that immediately precedes it. This problem was modeled as a SemEval shared task -RUMOUREVAL (Gorrell et al., 2019) , consisting of two subtasks.", |
| "cite_spans": [ |
| { |
| "start": 366, |
| "end": 388, |
| "text": "(Gorrell et al., 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A. Stance Classification -Given a source post introducing a rumour and the ensuing conversation thread, classify the source and each post in the thread into one of 4 categories.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 SUPPORT : The author of the response supports the veracity of the rumour.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 DENY : The author of the response denies the veracity of the rumour. \u2022 QUERY : The author of the response asks for additional evidence in relation to the veracity of the rumour. \u2022 COMMENT : The author of the response makes their own comment without a clear contribution to assessing the veracity of the rumour.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "B. Veracity Prediction -Classify the rumour as TRUE, UNVERIFIED, or FALSE.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this work, we focus on the Stance Classification (highlighted in Appendix A). The heavy class imbalance (highlighted in Appendix B) coupled with the low inter-annotator agreement (\u21e063 %) (Derczynski et al., 2017 ) makes this a challenging task.", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 214, |
| "text": "(Derczynski et al., 2017", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "This section highlights prior work on the Ru-mourEval dataset. Table 1 provides a short summary of each of the models analysed below.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 63, |
| "end": 70, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Description BranchLSTM (Kochkina et al., 2017) LSTM-based stance prediction using tweet branches BLCU (Yang et al., 2019) Inference-chain based GPT with word and tweet features BUT-FIT (Fajcik et al., 2019) BERT ensemble for stance classification, without hand-crafted features EventAI (Li et al., 2019) Ensemble of ML and Rulebased models with extensive feature engineering ", |
| "cite_spans": [ |
| { |
| "start": 23, |
| "end": 46, |
| "text": "(Kochkina et al., 2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 102, |
| "end": 121, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 185, |
| "end": 206, |
| "text": "(Fajcik et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 286, |
| "end": 303, |
| "text": "(Li et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "EventAI, BranchLSTM and BLCU employed extensive feature engineering as described below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Engineering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 Lexicon Based: BranchLSTM utilized the count of negation words and swear words. BLCU made more extensive use of lexicons and looked for the presence of positive and negative words, swear words, query words and different classes of verbs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Engineering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 Relation To Other Posts: BranchLSTM and EventAI used cosine similarity between source and target embeddings. BLCU used the depth of the post in the thread. \u2022 Content Based: BLCU checked for the presence of punctuations, hashtags, URLs and \"RT\". EventAI used similar features along with mentions of special accounts and hashtags (@cnn, #fakenews etc). \u2022 Tweet Role: BranchLSTM and EventAI had features indicating whether the tweet was a source or a reply. \u2022 Tweet and User Metadata: BLCU used tweet and user features such as favorite and retweet counts, follower and friend counts etc.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Engineering", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Since unsupervised pre-training for word representations has demonstrated success on a large variety of NLP tasks, the top performing models use pretrained contextual word representations. BUT-FIT uses BERT (Devlin et al., 2019) , BLCU uses GPT (Radford, 2018) and CLEARumor (Baris et al., 2019) uses ELMo (Peters et al., 2018) . However, since none of these models are trained on Twitter/Reddit data, fine-tuning on social media data might help capture its idiosyncrasies such as usage of emoticons, opinion-centric text as opposed to fact-centric text, shorter sentences etc.", |
| "cite_spans": [ |
| { |
| "start": 207, |
| "end": 228, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 245, |
| "end": 260, |
| "text": "(Radford, 2018)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 275, |
| "end": 295, |
| "text": "(Baris et al., 2019)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 306, |
| "end": 327, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-training", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "BranchLSTM and EventAI used similarity of the target post with other parts of the thread as features. Additionally, BranchLSTM treated a conversation thread as a set of linear branches. They defined a branch as a chain of tweets that included a leaf post and all its parents all the way to the source post. BLCU utilized the entire conversation thread by concatenating it with the target post.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Intra-thread context", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "BUT-FIT made the assumption that the stance of the target post depends only on itself, the source post, and the previous post in the thread.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Intra-thread context", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "Our system (Figure 1 ) utilizes the content from the SOURCE and PARENT tweets as additional context following previous work (Fajcik et al.,2019; Yang et al.,2019) which noted that the above two tweets mostly contain sufficient information to classify a TARGET tweet correctly. In this work, we leverage various inductive biases and propose late fusion in \u00a74.1, social media fine-tuning to better leverage BERT in \u00a74.2, discrimination between social media platforms in \u00a74.3, domain-specific features over generic textual features in \u00a74.4, and transition priors to better capture conversation dynamics in \u00a74.5. ", |
| "cite_spans": [ |
| { |
| "start": 124, |
| "end": 144, |
| "text": "(Fajcik et al.,2019;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 145, |
| "end": 162, |
| "text": "Yang et al.,2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 11, |
| "end": 20, |
| "text": "(Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We attend over the SOURCE and PARENT tweets separately followed by a late fusion of their [CLS] representations. This architecture allows for the TARGET tweets to independently attend over the SOURCE and the PARENT tweet, ensuring the capture of complementary information. This avoids the dilution of context that occurs through the combination of SOURCE and PARENT as context.", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 95, |
| "text": "[CLS]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Late Fusion", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "One pattern of errors exhibited by the previous models is due to their lack of ability to understand the 'social media' style of text. Common conversational constructs like sarcasm / rhetoric (usually intended to attack / refute someone as opposed to being a genuine question seeking more information) were wrongly labelled as QUERY due to the text containing symbols like \"?\" or interrogative words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Understanding conversational constructs", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To combat this, we use Conversational BERT 2 which is trained on social media and dialogue data. We further fine-tune this model on tweets from the RumourEval dataset and the larger PHEME dataset to incorporate additional background knowledge about rumourous tweets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Understanding conversational constructs", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Our rumours originate from two different social media domains -Twitter and Reddit. Though all prior work has trained models on a combination of data from both sources, we argue for domain separation owing to the fundamental differences in the type of content and interactions on these platforms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Domain Separation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Twitter rumours are concerned with breaking news (Charlie Hebdo shooting, Ferguson unrest) while Reddit rumours are around long-standing conspiracy theories (Flat earth, benefits of Nicotine etc). Reddit discussion threads are shorter and converge sooner i.e. it takes fewer replies to collect the required information. But in case of Twitter, obtaining information that resolves a rumour is a more continuous and a longer process (Priya et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 431, |
| "end": 451, |
| "text": "(Priya et al., 2019)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Domain Separation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Hence, we train separate models on the Twitter and Reddit data and later aggregate the results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Domain Separation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Prior work has experimented with inclusion of lexical, sentiment, and emotional features but report little to no improvement (likely because BERT already captures these features). We instead run a TF-IDF vectorizer to extract most discriminatory features for each class and use a subset along with BERT. It also worth noting that these features varied between Twitter and Reddit, further corroborating our hypothesis for domain separation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additional Features", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Upon deeper analysis of our training data, we observed that social media conversations tend to follow certain patterns -a QUERY stance is less likely to follow another QUERY stance (questions are usually followed by answers) while SUPPORT stance is highly likely to follow another SUPPORT stance (users espousing the same opinion). We incorporate this inductive bias via a post-processing module where we linearly interpolate the confidence scores from our model and the prior.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incorporating a prior", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "We use the HuggingFace Transformers library 3 to fine-tune BERT base on the Sequence Classification task. We use the Adam optimizer (Kingma and Ba, 3 Tweet 1 -\"WERE YOU THERE THOUGH\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Tweet 2 -\"Your mind just can't fathom that can it?\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "COMMENT The gold label is QUERY though these questions are rhetorical 4 Source -\"At least 10 killed in shooting\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Tweet 1 -\"11 Killed now\" in reply to Source Tweet 2 -\"11 Killed\" in reply to Source -Gold labels are different {Tweet 1: SUPPORT, Tweet 2: DENY} though the texts have the same meaning ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our approach achieves an F 1 of 58.7, outperforming non-ensemble approaches by 2 F 1 as shown in Table 3 . Though ensembles from BUT-FIT and BLCU achieve a higher F 1 score, we do not ensemble our model owing to high computational cost for training and inference (for ex. BUT-FIT ensembles over 100 BERT large models). We report our best and average (over 5 random seeds) on the Ru-mourEval 2019 dev dataset. 58.7 Table 3 : Comparison with state of the art models Ensembles from BUT-FIT and BLCU produce scores that are higher than those presented here. We show results of comparable non-ensemble versions of state of the art models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 97, |
| "end": 104, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 414, |
| "end": 421, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Error Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In this section, we analyze individual components of our contribution and report incremental improvements in Table 4 Conversational Pre-Training allows the model to correctly interpret social media constructs like sarcasm, rhetoric (Table 2, Ex. 1) and yields a boost of 2.5 F 1 . TF-IDF features improve the score by 1.5 F 1 by biasing the model based on frequently used words/phrases for each stance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 109, |
| "end": 116, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Domain Separation and Late Fusion provide further gains, increasing the F 1 by 1.2. In addition to improving the score, domain separation is also essential for using TF-IDF features and prior as they are platform dependant.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Transition Priors increase the performance by 2.3 F 1 by guiding the prediction based on stance transition priors in cases where the model makes uncertain predictions (Table 2, Ex. 2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ablation Study", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "The RumourEval dataset contains examples with noisy annotations (Table 2, Ex. 3) where the ground truth is mislabeled, thus penalizing our model for correct predictions. Additionally, few examples which have the same hierarchy and similar text (Table 2, Ex. 4) are assigned different labels (Possibly due to different interpretations among annotators) resulting in noisy training examples.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 64, |
| "end": 77, |
| "text": "(Table 2, Ex.", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Unsolvable Examples", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Another class of unsolvable examples stemmed from deleted tweets. If a particular tweet was deleted, the dataset attaches its children to their GRANDPARENT tweet. This presents issues as the children express opinions towards a deleted tweet. A potential solution would be to remove tweets where the '@' mention is towards an unseen author but we would risk further reducing the small number of training examples in our dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unsolvable Examples", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In this work, we showcased the efficacy of inductive biases to the task of stance classification and achieved a score of 58.7 F 1 , surpassing existing approaches. We hope to utilize this model in other downstream tasks like veracity detection (Task B) and expand our inductive biases to other social media tasks such as fact verification and conversation derailment detection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "https://github.com/sharanyarc96/ SocialMediaRumorStanceDetection", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "docs.deeppavlov.ai/en/master/ features/models/bert.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/huggingface/ transformers", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "CLEARumor at SemEval-2019 Task 7: Con-voLving ELMo Against Rumors", |
| "authors": [ |
| { |
| "first": "Ipek", |
| "middle": [], |
| "last": "Baris", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukas", |
| "middle": [], |
| "last": "Schmelzeisen", |
| "suffix": "" |
| }, |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Staab", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1904.03084" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ipek Baris, Lukas Schmelzeisen, and Steffen Staab. 2019. CLEARumor at SemEval-2019 Task 7: Con- voLving ELMo Against Rumors. arXiv preprint arXiv:1904.03084.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Clues for Detecting Irony in User-Generated Contents: Oh...!! It's \"so Easy", |
| "authors": [ |
| { |
| "first": "Paula", |
| "middle": [], |
| "last": "Carvalho", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu\u00eds", |
| "middle": [], |
| "last": "Sarmento", |
| "suffix": "" |
| }, |
| { |
| "first": "M\u00e1rio", |
| "middle": [ |
| "J" |
| ], |
| "last": "Silva", |
| "suffix": "" |
| }, |
| { |
| "first": "Eug\u00e9nio De", |
| "middle": [], |
| "last": "Oliveira", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "year = 2009, isbn = 9781605588056, publisher = Association for Computing Machinery, address =", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/1651461.1651471" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paula Carvalho, Lu\u00eds Sarmento, M\u00e1rio J. Silva, and Eug\u00e9nio de Oliveira. Clues for Detecting Irony in User-Generated Contents: Oh...!! It's \"so Easy\";-), year = 2009, isbn = 9781605588056, publisher = Association for Computing Machin- ery, address = New York, NY, USA, url = https://doi.org/10.1145/1651461.1651471,.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "SemEval-2017 Task 8: RumourEval: Determining rumour veracity and support for rumours", |
| "authors": [ |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| }, |
| { |
| "first": "Kalina", |
| "middle": [], |
| "last": "Bontcheva", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Liakata", |
| "suffix": "" |
| }, |
| { |
| "first": "Rob", |
| "middle": [], |
| "last": "Procter", |
| "suffix": "" |
| }, |
| { |
| "first": "Geraldine", |
| "middle": [], |
| "last": "Wong Sak", |
| "suffix": "" |
| }, |
| { |
| "first": "Arkaitz", |
| "middle": [], |
| "last": "Hoi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zubiaga", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)", |
| "volume": "", |
| "issue": "", |
| "pages": "69--76", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Leon Derczynski, Kalina Bontcheva, Maria Liakata, Rob Procter, Geraldine Wong Sak Hoi, and Arkaitz Zubiaga. 2017. SemEval-2017 Task 8: RumourEval: Determining rumour veracity and support for ru- mours. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 69-76, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. In NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "BUT-FIT at SemEval-2019 Task 7: Determining the Rumour Stance with Pre-Trained Deep Bidirectional Transformers", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Fajcik", |
| "suffix": "" |
| }, |
| { |
| "first": "Luk\u00e1\u0161", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Smrz", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1902.10126" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin Fajcik, Luk\u00e1\u0161 Burget, and Pavel Smrz. 2019. BUT-FIT at SemEval-2019 Task 7: Determining the Rumour Stance with Pre-Trained Deep Bidirectional Transformers. arXiv preprint arXiv:1902.10126.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "SemEval-2019 Task 7: RumourEval, Determining Rumour Veracity and Support for Rumours", |
| "authors": [ |
| { |
| "first": "Genevieve", |
| "middle": [], |
| "last": "Gorrell", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Kochkina", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Liakata", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmet", |
| "middle": [], |
| "last": "Aker", |
| "suffix": "" |
| }, |
| { |
| "first": "Arkaitz", |
| "middle": [], |
| "last": "Zubiaga", |
| "suffix": "" |
| }, |
| { |
| "first": "Kalina", |
| "middle": [], |
| "last": "Bontcheva", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "845--854", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Genevieve Gorrell, Elena Kochkina, Maria Liakata, Ahmet Aker, Arkaitz Zubiaga, Kalina Bontcheva, and Leon Derczynski. 2019. SemEval-2019 Task 7: RumourEval, Determining Rumour Veracity and Support for Rumours. In Proceedings of the 13th International Workshop on Semantic Evaluation, pages 845-854, Minneapolis, Minnesota, USA. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Adam: A Method for Stochastic Optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A Method for Stochastic Optimization. CoRR, abs/1412.6980.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Turing at semeval-2017 task", |
| "authors": [ |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Kochkina", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Liakata", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabelle", |
| "middle": [], |
| "last": "Augenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Sequential approach to rumour stance classification with branch-lstm", |
| "volume": "8", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1704.07221" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elena Kochkina, Maria Liakata, and Isabelle Augen- stein. 2017. Turing at semeval-2017 task 8: Sequen- tial approach to rumour stance classification with branch-lstm. arXiv preprint arXiv:1704.07221.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "even-tAI at SemEval-2019 Task 7: Rumor Detection on Social Media by Exploiting Content, User Credibility and Propagation Information", |
| "authors": [ |
| { |
| "first": "Quanzhi", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Luo", |
| "middle": [], |
| "last": "Si", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "855--859", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Quanzhi Li, Qiong Zhang, and Luo Si. 2019. even- tAI at SemEval-2019 Task 7: Rumor Detection on Social Media by Exploiting Content, User Credibil- ity and Propagation Information. In Proceedings of the 13th International Workshop on Semantic Evalu- ation, pages 855-859.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Twitter for Sparking a Movement", |
| "authors": [ |
| { |
| "first": "Lydia", |
| "middle": [], |
| "last": "Manikonda", |
| "suffix": "" |
| }, |
| { |
| "first": "Ghazaleh", |
| "middle": [], |
| "last": "Beigi", |
| "suffix": "" |
| }, |
| { |
| "first": "Huan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Subbarao", |
| "middle": [], |
| "last": "Kambhampati", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lydia Manikonda, Ghazaleh Beigi, Huan Liu, and Sub- barao Kambhampati. Twitter for Sparking a Move- ment, Reddit for Sharing the Moment: #metoo.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Deep contextualized word representations. ArXiv", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [ |
| "E" |
| ], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. ArXiv, abs/1802.05365.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Where should one get news updates: Twitter or reddit", |
| "authors": [ |
| { |
| "first": "Shalini", |
| "middle": [], |
| "last": "Priya", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Sequeira", |
| "suffix": "" |
| }, |
| { |
| "first": "Joydeep", |
| "middle": [], |
| "last": "Chandra", |
| "suffix": "" |
| }, |
| { |
| "first": "Sourav Kumar", |
| "middle": [], |
| "last": "Dandapat", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Online Social Networks and Media", |
| "volume": "9", |
| "issue": "", |
| "pages": "17--29", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shalini Priya, Ryan Sequeira, Joydeep Chandra, and Sourav Kumar Dandapat. 2019. Where should one get news updates: Twitter or reddit. Online Social Networks and Media, 9:17-29.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Improving Language Understanding by Generative Pre-Training", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford. 2018. Improving Language Understand- ing by Generative Pre-Training.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "BLCU NLP at SemEval-2019 Task 7: An Inference Chain-based GPT Model for Rumour Evaluation", |
| "authors": [ |
| { |
| "first": "Ruoyao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanying", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Chunhua", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Dong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "1090--1096", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruoyao Yang, Wanying Xie, Chunhua Liu, and Dong Yu. 2019. BLCU NLP at SemEval-2019 Task 7: An Inference Chain-based GPT Model for Rumour Evaluation. In Proceedings of the 13th Interna- tional Workshop on Semantic Evaluation, pages 1090-1096.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Detection and Resolution of Rumours in Social Media: A Survey", |
| "authors": [ |
| { |
| "first": "Arkaitz", |
| "middle": [], |
| "last": "Zubiaga", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmet", |
| "middle": [], |
| "last": "Aker", |
| "suffix": "" |
| }, |
| { |
| "first": "Kalina", |
| "middle": [], |
| "last": "Bontcheva", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Liakata", |
| "suffix": "" |
| }, |
| { |
| "first": "Rob", |
| "middle": [], |
| "last": "Procter", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ACM Comput. Surv", |
| "volume": "51", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arkaitz Zubiaga, Ahmet Aker, Kalina Bontcheva, Maria Liakata, and Rob Procter. 2018. Detection and Resolution of Rumours in Social Media: A Sur- vey. ACM Comput. Surv., 51:32:1-32:36.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Model Architecture showcasing Late Fusion of SOURCE + TARGET and PARENT + TARGET representations along with additional features. BERT Input is of the form -[CLS] CONTEXT [SEP] TARGET. SOURCE is the post initiating the conversation. The TARGET post is made in response to the PARENT." |
| }, |
| "TABREF0": { |
| "html": null, |
| "text": "Related Work", |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "text": "", |
| "num": null, |
| "content": "<table><tr><td>: Qualitative examples from our model</td></tr><tr><td>2014), with a learning rate of 1.5e-6 and batch size</td></tr><tr><td>of 32 and train on an NVIDIA Tesla T4 GPU.</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "html": null, |
| "text": "", |
| "num": null, |
| "content": "<table><tr><td/><td>shows some</td></tr><tr><td colspan=\"2\">qualitative examples from our model.</td></tr><tr><td>Model</td><td>Macro-F1</td></tr><tr><td colspan=\"2\">BUT-FIT BERT base (Average) 51.4</td></tr><tr><td>BranchLSTM</td><td>49.3</td></tr><tr><td colspan=\"2\">BUT-FIT BERT large (Average) 56.2</td></tr><tr><td>BLCU (Best Reported)</td><td>56.6</td></tr><tr><td>Ours (Average)</td><td>56.7</td></tr><tr><td>Ours (Best)</td><td/></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "html": null, |
| "text": "Effect of each of our inductive biases", |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |