| { |
| "paper_id": "U19-1009", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:07:52.531765Z" |
| }, |
| "title": "Predicting Political Frames Across Policy Issues and Contexts", |
| "authors": [ |
| { |
| "first": "Shima", |
| "middle": [], |
| "last": "Khanehzar", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne", |
| "location": {} |
| }, |
| "email": "skhanehzar@student.unimelb.edu.au" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Turpin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne", |
| "location": {} |
| }, |
| "email": "aturpin@unimelb.edu.au" |
| }, |
| { |
| "first": "Gosia", |
| "middle": [], |
| "last": "Mikolajczak", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Melbourne", |
| "location": {} |
| }, |
| "email": "malgorzata.mikolajczak@unimelb.edu.au" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Politically-contested issues are often discussed with different emphases by different people. This emphasis is called a frame. In this paper, we examine the performance of classifiers trained using the media frames Corpus (MFC) (Card et al., 2015); a collection of US news labelled with fifteen different frame categories. Specifically, we compare pre-trained language models (XLNet, Bert, and Roberta), fine-tuned using MFC, against results from the literature and simpler models in their ability to predict frames from text. We also test these models on a new corpus that we have derived from Australian parliamentary speeches. Our experimental results first show that the fine-tuned models significantly outperform the current best methods on MFC. We also show that the model fine-tuned on US news articles can be convincingly applied to predict policy frames in Australian parliamentary speeches, though the accuracy is significantly reduced, suggesting potential discrepancy in framing strategies and/or text usage between US News and Australian Parliamentary Speeches.", |
| "pdf_parse": { |
| "paper_id": "U19-1009", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Politically-contested issues are often discussed with different emphases by different people. This emphasis is called a frame. In this paper, we examine the performance of classifiers trained using the media frames Corpus (MFC) (Card et al., 2015); a collection of US news labelled with fifteen different frame categories. Specifically, we compare pre-trained language models (XLNet, Bert, and Roberta), fine-tuned using MFC, against results from the literature and simpler models in their ability to predict frames from text. We also test these models on a new corpus that we have derived from Australian parliamentary speeches. Our experimental results first show that the fine-tuned models significantly outperform the current best methods on MFC. We also show that the model fine-tuned on US news articles can be convincingly applied to predict policy frames in Australian parliamentary speeches, though the accuracy is significantly reduced, suggesting potential discrepancy in framing strategies and/or text usage between US News and Australian Parliamentary Speeches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Politicians and the media often portray political issues in a subjective way in an attempt to shape public attitudes (Chong and Druckman, 2007) . For example, a politician opposing the same-sex marriage (SSM) might frame the issue using the lens of tradition and religious beliefs, whereas a politician supporting SSM might frame a speech using fairness and equality as the base. Due to its complexity and linguistic subtleties, issue framing (Entman, 1993) remains challenging for automated text methods. To address these challenges, recent work by Boydstun et al. (2013) defines broad categories of common policy frames and annotates US News articles to build the media frames Corpus (MFC) (Card et al., 2015) . Follow-up studies have used the MFC to investigate the accuracy of models that attempt to classify the dominant frames of US news articles. In this paper, we aim to extend this work and answer the following question: can recent pre-trained neural classifiers learn to predict dominant frames across issues and communication contexts? To answer this question, we provide the following contributions.", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 143, |
| "text": "(Chong and Druckman, 2007)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 443, |
| "end": 457, |
| "text": "(Entman, 1993)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 692, |
| "end": 711, |
| "text": "(Card et al., 2015)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We investigate the effectiveness of the pretrained language models XLNet, Bert and Roberta in predicting dominant frames within each issue on the MFC.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We investigate whether our models can learn to predict frame categories across issues. Our results show that we can apply trained models on a new issue without training data for that particular issue.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We annotate a small subset of Australian parliamentary speeches on Same-Sex Marriage (SSM).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We evaluate whether our models can learn to predict frames across communication contexts, applying the models fine-tuned on the MFC dataset on the Australian parliamentary speeches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Natural Language Processing techniques have been applied to identify several aspects of the political discourse including ideology (Iyyer et al., 2014) , sentiment (Godbole et al., 2007; Balahur et al., 2010) , and stance (Mohammad et al., 2016) . Earlier studies focusing specifically on frame detection usually 1 employ topic modeling (Boydstun et al., 2013) , (Nguyen, 2015) , (Tsur et al., 2015 ). This approach allows for automated detection of frames within specific corpora, but does not easily allow results and methods to be used across issues or contexts that are not part of the corpus on which the model is built. To address this shortcoming, Boydstun et al. 2013proposed a list of 15 broad frames (e.g., Economic, Morality, or Legal; plus an \"Other\" category) commonly used when discussing different policy issues (such as abortion, immigration, foreign aid, etc.), and in different communication contexts (news stories, Twitter, party manifestos, legislative debates, etc.). The frames have been defined in the Policy Frame Codebook (PFC)", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 151, |
| "text": "(Iyyer et al., 2014)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 164, |
| "end": 186, |
| "text": "(Godbole et al., 2007;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 187, |
| "end": 208, |
| "text": "Balahur et al., 2010)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 222, |
| "end": 245, |
| "text": "(Mohammad et al., 2016)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 337, |
| "end": 360, |
| "text": "(Boydstun et al., 2013)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 363, |
| "end": 377, |
| "text": "(Nguyen, 2015)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 380, |
| "end": 398, |
| "text": "(Tsur et al., 2015", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The The Media Frames Corpus (MFC) Card et al. 2015includes news articles from 13 U.S. newspapers, covering five policy issues: samesex marriage, immigration, tobacco, gun control, and the death penalty, published between 1980-2012. Approximately 12,000 articles have been annotated with the dominant frame from the list of categories proposed in PFC. The annotations also identify exact text spans associated with each of the 15 frames. Since the frame distribution is imbalanced and not reported in the original paper, here we show the the statistical distribution of the frameworks in table 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The MFC has been previously used for training and testing classification models. For example, Card et al. (2016) provide an unsupervised model that clusters articles with similar collections of \"personas\" (i.e., characterisations of entities) and demonstrate that these personas can help predict the coarse-grained framing annotations in the MFC.", |
| "cite_spans": [ |
| { |
| "start": 94, |
| "end": 112, |
| "text": "Card et al. (2016)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The current best result for predicting the dominant frame of each article in the MFC comes from Ji and Smith (2017), who proposed a recursive neural discourse structure network with a new attention mechanism of the text for text categorization. They report the average accuracy across 10fold cross-validation using the immigration issue which we report in Table 2 (column 4). Field et al. (2018) used the MFC to investigate agenda-setting and framing in Russian News. They introduced embedding-based methods for projecting frames of one language into another (i.e., English to Russian). It is worth mentioning that their approach is applicable to languages suffering from lack of training data.", |
| "cite_spans": [ |
| { |
| "start": 376, |
| "end": 395, |
| "text": "Field et al. (2018)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 356, |
| "end": 363, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this paper, we explore three general approaches to classify text with the frames from the PFC. First, We create baseline models with Support Vector Machine (SVM) and Weighted Support Vector Machine (Weighted-SVM). SVMs are often used for text classification problems, as the algorithms perform classification by finding hyperplanes to differentiate the classes. Weighted-SVM is often used for dataset with skewed distribution to reduce bias, and it is more suitable for MFC, which has an imbalanced class distribution. We implement SVM and Weighted-SVM using the default parameters in the sklearn python library. Second, we use the MFC to form a lexicon (bag of words) for each frame and classify new texts using the Okapi text similarity metrics (Robertson and Zaragoza, 2009) from each lexicon. Last, we employ pre-trained language models, and finetune them with the MFC. Since our primary goal is to investigate if framing shares similar patterns across domains, we evaluate these models across issues and contexts. For across-issue evaluation, we fine-tune our models on four issues from the MFC (i.e., excluding immigration), and then evaluate them on the immigration subset. For acrosscontext evaluation, we evaluate the models on a subset of the Australian Parliamentary Speeches (APS), which we describe in more detail below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Based on the approach by Field et al. (2018) , a lexicon related to each frame f in the PFC is derived by taking the top 50 words with the highest pointwise mutual information I(f, w) = log p(w|f ) \u2212 log p(w), where w is a word. We compute P (w|f ) by taking the number of occurrences of w in all the text segments annotated with the secondary frame f in the MFC divided by the total number of words in those segments. Quantity P (w) is computed similarly over the entire corpus. As in Field et al. (2018) , we discard all words that occur in fewer than 0.5% of documents or in more than 98% of documents.", |
| "cite_spans": [ |
| { |
| "start": 25, |
| "end": 44, |
| "text": "Field et al. (2018)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 486, |
| "end": 505, |
| "text": "Field et al. (2018)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Framing Lexicons", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In order to classify a document into one of the 15 frames, we take the highest ranked lexicon using the document as a query against a collection of the 15 lexicons, measuring similarity using Okapi scoring (Robertson and Zaragoza, 2009) . We use the default parameters in the Okapi formula as implemented in the Gensim Python Library. ", |
| "cite_spans": [ |
| { |
| "start": 206, |
| "end": 236, |
| "text": "(Robertson and Zaragoza, 2009)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Framing Lexicons", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Bert (Devlin et al., 2019 ) is a bi-directional language model based on now ubiquitous Transformers (Vaswani et al., 2017) with a Cloze Test objective, and trained on a large text corpus. The pretrained Bert model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. In this work, we add an extra task-specific neural layer followed by a non-linear layer and softmax for text classification on top of Bert. Then, the extra layers are jointly fine-tuned with the pretrained Bert. A prominent limitation of Bert is that it takes at most 512 word tokens, which is often too small for document level tasks.", |
| "cite_spans": [ |
| { |
| "start": 5, |
| "end": 25, |
| "text": "(Devlin et al., 2019", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 100, |
| "end": 122, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "XLNet ( Roberta (Liu et al., 2019) is an improved version of Bert trained on a larger dataset with longer sequences. It also modifies the original design of Bert by removing the next sentence prediction objective and dynamically changing the masking pattern during pre-training. The author of Roberta claims that Roberta is comparable with XLNet on all GLUE (Wang et al., 2019) tasks and SQUAD (Rajpurkar et al., 2016) , and achieves the state-ofthe-art performance on 4/9 of the GLUE tasks.", |
| "cite_spans": [ |
| { |
| "start": 16, |
| "end": 34, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 358, |
| "end": 377, |
| "text": "(Wang et al., 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 394, |
| "end": 418, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The Australian Parliamentary Speeches (APS) dataset includes transcripts of second reading speeches related to same-sex marriage (SSM) bills presented in the the House of Representatives of the Australian Parliament between 2004-2017. The data has been obtained from the Federal Parliament website. A random sample of 100 speeches was given to an honour student in political science, who was asked to identify 15 frame categories from the PFC, and to indicate the relevant passages representing each frame. The rater was also asked to indicate the dominant frame of each speech. We report the APS frame statistics in table 1. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The APS Dataset", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We divide our experiments into four parts: Same-Issue and Same-Context (SISC); Across-Issue and Same-Context (AISC); Same-Issue and Across-Context (SIAC); Across-Issue and Across-Context. We follow the same setup as in Card et al. (2016) and report average accuracy across 10-fold cross validation. We use the Bert-Base-Cased, Roberta-Base, Xlnet-Base-Cased models. We use the pre-trained model from Huggingface package. We set the maximum sequence length to 256 since the average number of tokens for SSM and IM are 253 and 254 respectively. For more details about the pre-trained models' parameters, we refer to the Huggingface package.", |
| "cite_spans": [ |
| { |
| "start": 219, |
| "end": 237, |
| "text": "Card et al. (2016)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Same-Issue and Same-Context (SISC) We fine-tune and evaluate our models on the Same-Sex Marriage (SSM) and Immigration (IM) issues from the MFC dataset, and compare the results for IM with the previously proposed models, since to the best of our knowledge, IM is the only issue with results reported in previous work. Table 2 columns 2 and 4 show that the neural models outperform the basic classifier and lexicon-based methods. A paired t-test between Roberta-Base and Framing Lexicons method confirms the difference is statistically significant (p < 0.001). The difference between Roberta-Base and Xlnet-Base-Case is not statistically significant (p = 0.061), while the difference between Roberta-Base and Bert-Base-Case is (p = 0.008).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 318, |
| "end": 326, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments and Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To examine if our models can learn to predict frames across issues, we first exclude the SSM and IM data, respectively, from the MFC dataset and finetune our models on the data for the remaining issues. Then, we evaluate the models on the SSM and IM data and compare our results with the previously proposed models. Columns 3 and 5 of Table 2 show that there is a decrease from SISC in mean accuracy of about 4% for SSM, and 9% for IM. However, the classifiers are still well above chance, which is about 27.9% for SSM and 16.3% for IM if we default to the most common frame in the the respective issues.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 335, |
| "end": 342, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Across-Issue and Same-Context (AISC)", |
| "sec_num": null |
| }, |
| { |
| "text": "To examine if our models can learn to predict frames across communication context, we fine-tune our models on the SSM data from the MFC, and then evaluate our models on the APS dataset. Table 3 (column 3) shows that there is a further drop in mean accuracy here for all models, but again still above chance, which is about 35.0% for APS if we default to the most common frame in the the respective issues.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 186, |
| "end": 193, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Same-Issue and Across-Context (SIAC)", |
| "sec_num": null |
| }, |
| { |
| "text": "Across-Issue and Across-Context (AIAC) To examine if our models can still learn to predict frames across both issue and communication context, we fine-tune our models on all other MFC data excluding SSM data, and then evaluate our models on APS dataset. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Same-Issue and Across-Context (SIAC)", |
| "sec_num": null |
| }, |
| { |
| "text": "The previous best mean accuracy for predicting the dominant frame on the Immigration subset of the MFC is 58.4%. Our best model (Roberta-Base) fine-tuned with data on the same issue improves the performance by 12.7%, and our best model (Roberta-large-not shown in Table ? ?) fine-tuned on data not including the Immigration subset has 56.26% accuracy; still comparable performance against previous methods. Our best model outperforms the previous best models on the MFC by a large margin. Notably, the performance of pre-trained language models is comparable to the previous best models, even with only fine-tuning on data not specific to the issue being classified, proving that pretrained neural classifiers can learn to predict dominant frames across domains. However, fine-tuning on small amount of domain-specific data still outperforms the same models fine-tuned on out-ofdomain datasets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 264, |
| "end": 271, |
| "text": "Table ?", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Using a pre-trained Roberta (Liu et al., 2019) model with added issue-and context-specific data to predict the dominant frame of a text improves upon the current state-of-the-art. Such a model that is trained on U.S. media articles can be convincingly applied to predict frames in Australian political speeches, though the accuracy is significantly reduced, suggesting potential discrepancy in framing strategy between US News and Australian Parliamentary Speeches, and/or different uses of language in the two contexts. Over the coming months, we will work on improving the size and quality of the APS data and examine ways to improve the prediction of dominant frames in Australian political text.", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 46, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "for an exception, seeBaumer et al. (2015) who use classifiers to identify the language of framing in the news", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Sentiment analysis in the news", |
| "authors": [ |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Balahur", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralf", |
| "middle": [], |
| "last": "Steinberger", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mijail", |
| "suffix": "" |
| }, |
| { |
| "first": "Vanni", |
| "middle": [], |
| "last": "Kabadjov", |
| "suffix": "" |
| }, |
| { |
| "first": "Erik", |
| "middle": [], |
| "last": "Zavarella", |
| "suffix": "" |
| }, |
| { |
| "first": "Matina", |
| "middle": [], |
| "last": "Van Der Goot", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruno", |
| "middle": [], |
| "last": "Halkia", |
| "suffix": "" |
| }, |
| { |
| "first": "Jenya", |
| "middle": [], |
| "last": "Pouliquen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Belyaeva", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the International Conference on Language Resources and Evaluation, LREC 2010", |
| "volume": "", |
| "issue": "", |
| "pages": "17--23", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexandra Balahur, Ralf Steinberger, Mijail A. Kabad- jov, Vanni Zavarella, Erik Van der Goot, Matina Halkia, Bruno Pouliquen, and Jenya Belyaeva. 2010. Sentiment analysis in the news. In Proceedings of the International Conference on Language Re- sources and Evaluation, LREC 2010, 17-23 May 2010, Valletta, Malta.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Testing and comparing computational approaches for identifying the language of framing in political news", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Baumer", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisha", |
| "middle": [], |
| "last": "Elovic", |
| "suffix": "" |
| }, |
| { |
| "first": "Ying", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Francesca", |
| "middle": [], |
| "last": "Polletta", |
| "suffix": "" |
| }, |
| { |
| "first": "Geri", |
| "middle": [], |
| "last": "Gay", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "The 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1472--1482", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric Baumer, Elisha Elovic, Ying Qin, Francesca Pol- letta, and Geri Gay. 2015. Testing and comparing computational approaches for identifying the lan- guage of framing in political news. In NAACL HLT 2015, The 2015 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Denver, Colorado, USA, May 31 -June 5, 2015, pages 1472- 1482.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Identifying media frames and frame dynamics within and across policy issues", |
| "authors": [ |
| { |
| "first": "Justin", |
| "middle": [ |
| "H" |
| ], |
| "last": "Amber E Boydstun", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amber E Boydstun, Justin H Gross, Philip Resnik, and Noah A Smith. 2013. Identifying media frames and frame dynamics within and across policy issues.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The media frames corpus: Annotations of frames across issues", |
| "authors": [ |
| { |
| "first": "Dallas", |
| "middle": [], |
| "last": "Card", |
| "suffix": "" |
| }, |
| { |
| "first": "Amber", |
| "middle": [ |
| "E" |
| ], |
| "last": "Boydstun", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [ |
| "H" |
| ], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "2", |
| "issue": "", |
| "pages": "438--444", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dallas Card, Amber E Boydstun, Justin H Gross, Philip Resnik, and Noah A Smith. 2015. The media frames corpus: Annotations of frames across issues. In Pro- ceedings of the 53rd Annual Meeting of the Associ- ation for Computational Linguistics and the 7th In- ternational Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 438- 444.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Analyzing framing through the casts of characters in the news", |
| "authors": [ |
| { |
| "first": "Dallas", |
| "middle": [], |
| "last": "Card", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [ |
| "H" |
| ], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Amber", |
| "middle": [ |
| "E" |
| ], |
| "last": "Boydstun", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1410--1420", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dallas Card, Justin H. Gross, Amber E. Boydstun, and Noah A. Smith. 2016. Analyzing framing through the casts of characters in the news. In Proceedings of the 2016 Conference on Empirical Methods in Nat- ural Language Processing, EMNLP 2016, Austin, Texas, USA, November 1-4, 2016, pages 1410-1420.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Framing public opinion in competitive democracies", |
| "authors": [ |
| { |
| "first": "Dennis", |
| "middle": [], |
| "last": "Chong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "James N Druckman", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "American Political Science Review", |
| "volume": "101", |
| "issue": "4", |
| "pages": "637--655", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dennis Chong and James N Druckman. 2007. Framing public opinion in competitive democracies. Ameri- can Political Science Review, 101(4):637-655.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Transformer-xl: Attentive language models beyond a fixed-length context", |
| "authors": [ |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [ |
| "G" |
| ], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [], |
| "last": "Viet Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019", |
| "volume": "1", |
| "issue": "", |
| "pages": "2978--2988", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zihang Dai, Zhilin Yang, Yiming Yang, Jaime G. Car- bonell, Quoc Viet Le, and Ruslan Salakhutdinov. 2019. Transformer-xl: Attentive language models beyond a fixed-length context. In Proceedings of the 57th Conference of the Association for Compu- tational Linguistics, ACL 2019, Florence, Italy, July 28-August 2, 2019, Volume 1: Long Papers, pages 2978-2988.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Framing: Toward clarification of a fractured paradigm", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Robert M Entman", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Journal of communication", |
| "volume": "43", |
| "issue": "4", |
| "pages": "51--58", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert M Entman. 1993. Framing: Toward clarifica- tion of a fractured paradigm. Journal of communi- cation, 43(4):51-58.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Framing and agenda-setting in russian news: a computational analysis of intricate political strategies", |
| "authors": [ |
| { |
| "first": "Anjalie", |
| "middle": [], |
| "last": "Field", |
| "suffix": "" |
| }, |
| { |
| "first": "Doron", |
| "middle": [], |
| "last": "Kliger", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuly", |
| "middle": [], |
| "last": "Wintner", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [], |
| "last": "Pan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Yulia", |
| "middle": [], |
| "last": "Tsvetkov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3570--3580", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anjalie Field, Doron Kliger, Shuly Wintner, Jennifer Pan, Dan Jurafsky, and Yulia Tsvetkov. 2018. Fram- ing and agenda-setting in russian news: a compu- tational analysis of intricate political strategies. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, Brussels, Belgium, October 31 -November 4, 2018, pages 3570-3580.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Large-scale sentiment analysis for news and blogs", |
| "authors": [ |
| { |
| "first": "Namrata", |
| "middle": [], |
| "last": "Godbole", |
| "suffix": "" |
| }, |
| { |
| "first": "Manja", |
| "middle": [], |
| "last": "Srinivasaiah", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Skiena", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the First International Conference on Weblogs and Social Media, ICWSM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Namrata Godbole, Manja Srinivasaiah, and Steven Skiena. 2007. Large-scale sentiment analysis for news and blogs. In Proceedings of the First Inter- national Conference on Weblogs and Social Media, ICWSM 2007, Boulder, Colorado, USA, March 26- 28, 2007.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Political ideology detection using recursive neural networks", |
| "authors": [ |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Enns", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boyd-Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1113--1122", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P14-1105" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohit Iyyer, Peter Enns, Jordan Boyd-Graber, and Philip Resnik. 2014. Political ideology detection us- ing recursive neural networks. In Proceedings of the 52nd Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 1113-1122, Baltimore, Maryland. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Neural discourse structure for text categorization", |
| "authors": [ |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "996--1005", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1092" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yangfeng Ji and Noah A. Smith. 2017. Neural dis- course structure for text categorization. In Proceed- ings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017, Vancou- ver, Canada, July 30 -August 4, Volume 1: Long Papers, pages 996-1005.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Roberta: A robustly optimized bert pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "SemEval-2016 task 6: Detecting stance in tweets", |
| "authors": [ |
| { |
| "first": "Saif", |
| "middle": [], |
| "last": "Mohammad", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [], |
| "last": "Kiritchenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Parinaz", |
| "middle": [], |
| "last": "Sobhani", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Cherry", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)", |
| "volume": "", |
| "issue": "", |
| "pages": "31--41", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S16-1003" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saif Mohammad, Svetlana Kiritchenko, Parinaz Sob- hani, Xiaodan Zhu, and Colin Cherry. 2016. SemEval-2016 task 6: Detecting stance in tweets. In Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016), pages 31- 41, San Diego, California. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Guided Probabilistic Topic Models for Agenda-Setting and Framing", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Viet An Nguyen", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Viet An Nguyen. 2015. Guided Probabilistic Topic Models for Agenda-Setting and Framing. Ph.D. the- sis.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Squad: 100, 000+ questions for machine comprehension of text", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Konstantin", |
| "middle": [], |
| "last": "Lopyrev", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2383--2392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100, 000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Nat- ural Language Processing, EMNLP 2016, Austin, Texas, USA, November 1-4, 2016, pages 2383-2392.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The probabilistic relevance framework: BM25 and beyond", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Stephen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugo", |
| "middle": [], |
| "last": "Robertson", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zaragoza", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Foundations and Trends in Information Retrieval", |
| "volume": "3", |
| "issue": "4", |
| "pages": "333--389", |
| "other_ids": { |
| "DOI": [ |
| "10.1561/1500000019" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen E. Robertson and Hugo Zaragoza. 2009. The probabilistic relevance framework: BM25 and be- yond. Foundations and Trends in Information Re- trieval, 3(4):333-389.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A frame of mind: Using statistical models for detection of framing and agenda setting campaigns", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Tsur", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Calacci", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Lazer", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing of the Asian Federation of Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1629--1638", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oren Tsur, Dan Calacci, and David Lazer. 2015. A frame of mind: Using statistical models for detection of framing and agenda setting campaigns. In Pro- ceedings of the 53rd Annual Meeting of the Associ- ation for Computational Linguistics and the 7th In- ternational Joint Conference on Natural Language Processing of the Asian Federation of Natural Lan- guage Processing, ACL 2015, July 26-31, 2015, Bei- jing, China, Volume 1: Long Papers, pages 1629- 1638.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems 30: Annual Conference on Neural Information Processing Systems 2017, 4-9 Decem- ber 2017, Long Beach, CA, USA, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [ |
| "R" |
| ], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "7th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In 7th International Conference on Learning Representa- tions, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [ |
| "G" |
| ], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime G. Carbonell, Ruslan Salakhutdinov, and Quoc V. Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. CoRR, abs/1906.08237.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "type_str": "table", |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "text": "" |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>: Mean accuracy of Same-Issue and Same-Context (SISC); Across-Issue and Same-Context (AISC) evalu-</td></tr><tr><td>ated on both the Same-Sex Marriage (SSM) and Immigration (IM). The training and testing data are indicated in</td></tr><tr><td>the heading of each column.</td></tr></table>", |
| "text": "" |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td colspan=\"3\">Training data MFC no SSM MFC SSM</td></tr><tr><td>Testing data</td><td>APS</td><td>APS</td></tr><tr><td/><td>(AIAC)</td><td>(SIAC)</td></tr><tr><td>Roberta-Base</td><td>41.0</td><td>43.0</td></tr><tr><td>Xlnet-Base-Case</td><td>43.0</td><td>46.0</td></tr><tr><td>Bert-Base-Case</td><td>40.0</td><td>47.0</td></tr><tr><td>SVM</td><td>32.0</td><td>35.0</td></tr><tr><td>Weighted-SVM</td><td>33.0</td><td>37.0</td></tr><tr><td>Framing Lexicons</td><td>34.0</td><td>38.0</td></tr></table>", |
| "text": "(column 2) shows that there is a further drop in mean accuracy here, about 9.3% on average for all models, compared to SIAC, but again still above chance, which is about 35.0% for APS if we default to the most common frame in the the respective issues." |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>: Mean accuracy of Same-Issue and Across-</td></tr><tr><td>Context (SIAC); Across-Issue and Across-Context</td></tr><tr><td>(AIAC) evaluated on both the APS dataset. The train-</td></tr><tr><td>ing and testing data are indicated in the heading of each</td></tr><tr><td>column.</td></tr></table>", |
| "text": "" |
| } |
| } |
| } |
| } |