| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:49:18.132808Z" |
| }, |
| "title": "Viable Threat on News Reading: Generating Biased News Using Natural Language Models", |
| "authors": [ |
| { |
| "first": "Saurabh", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Indraprastha Institute of Information Technology -Delhi", |
| "location": { |
| "settlement": "Delhi", |
| "country": "India" |
| } |
| }, |
| "email": "saurabhg@iiitd.ac.in" |
| }, |
| { |
| "first": "Huy", |
| "middle": [ |
| "H" |
| ], |
| "last": "Nguyen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Institute of Informatics", |
| "location": { |
| "settlement": "Tokyo", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Junichi", |
| "middle": [], |
| "last": "Yamagishi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Institute of Informatics", |
| "location": { |
| "settlement": "Tokyo", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Isao", |
| "middle": [], |
| "last": "Echizen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Institute of Informatics", |
| "location": { |
| "settlement": "Tokyo", |
| "country": "Japan" |
| } |
| }, |
| "email": "iechizen@nii.ac.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Recent advancements in natural language generation has raised serious concerns. Highperformance language models are widely used for language generation tasks because they are able to produce fluent and meaningful sentences. These models are already being used to create fake news. They can also be exploited to generate biased news, which can then be used to attack news aggregators to change their reader's behavior and influence their bias. In this paper, we use a threat model to demonstrate that the publicly available language models can reliably generate biased news content based on an input original news. We also show that a large number of high-quality biased news articles can be generated using controllable text generation. A subjective evaluation with 80 participants demonstrated that the generated biased news is generally fluent, and a bias evaluation with 24 participants demonstrated that the bias (left or right) is usually evident in the generated articles and can be easily identified.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Recent advancements in natural language generation has raised serious concerns. Highperformance language models are widely used for language generation tasks because they are able to produce fluent and meaningful sentences. These models are already being used to create fake news. They can also be exploited to generate biased news, which can then be used to attack news aggregators to change their reader's behavior and influence their bias. In this paper, we use a threat model to demonstrate that the publicly available language models can reliably generate biased news content based on an input original news. We also show that a large number of high-quality biased news articles can be generated using controllable text generation. A subjective evaluation with 80 participants demonstrated that the generated biased news is generally fluent, and a bias evaluation with 24 participants demonstrated that the bias (left or right) is usually evident in the generated articles and can be easily identified.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Natural language generation is defined as the creation of understandable text using a language model (LM) trained on a large collection of texts. An (LM) is a probability distribution over a sequence of words. Given a set of training text sequences, we can train an LM to produce texts similar to the training data. Researchers have used deep learning algorithms to generate more fluent and semantically meaningful texts than those generated using conventional methods like n-grams (Lu et al., 2018) . Such LMs are being used to generate image captions (Vinyals et al., 2015) , perform machine translations (Bahdanau et al., 2015) , paraphrase and summarize text (Zhang et al., 2017) . High performance LMs can generate fake news, fake reviews, and fake comments (Adelani et al., 2020; Zellers et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 482, |
| "end": 499, |
| "text": "(Lu et al., 2018)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 553, |
| "end": 575, |
| "text": "(Vinyals et al., 2015)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 607, |
| "end": 630, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 663, |
| "end": 683, |
| "text": "(Zhang et al., 2017)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 763, |
| "end": 785, |
| "text": "(Adelani et al., 2020;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 786, |
| "end": 807, |
| "text": "Zellers et al., 2019)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recent studies have revealed various types of bias in top US news sources, which often report political news in a biased way, for example, attention can be drawn to particular events and entities while ignoring others (Ribeiro et al., 2018; Groseclose and Milyo, 2005; Kulshrestha et al., 2017) . The selection of what to report about an entity (positive or negative) produces bias. There are two major political sides in the U.S.: Democrats on the left and Republicans on the right.", |
| "cite_spans": [ |
| { |
| "start": 218, |
| "end": 240, |
| "text": "(Ribeiro et al., 2018;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 241, |
| "end": 268, |
| "text": "Groseclose and Milyo, 2005;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 269, |
| "end": 294, |
| "text": "Kulshrestha et al., 2017)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The news aggregating platforms like Google News and Yahoo News are the most viewed news websites in U.S. with 150 and 175 million unique visitors every month, respectively (Watson, 2019) . They offer content relevant to a wide range of global audiences, and therefore, they have a responsibility to maintain the same sentiment and bias. However, they can utilize language models to generate biased content (news headlines and articles) to model the behavior of their readers. Exposure to biased news is very harmful as it can increase/flip the political bias of a reader (Bail et al., 2018) . For example, (Wong, 2019) found that exposure to biased news can alter the political inclinations of people, and (Wanta and Hu, 1994) found that false representation of news from a news source can lead to broken trust between the reader and the news source.", |
| "cite_spans": [ |
| { |
| "start": 172, |
| "end": 186, |
| "text": "(Watson, 2019)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 571, |
| "end": 590, |
| "text": "(Bail et al., 2018)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 606, |
| "end": 618, |
| "text": "(Wong, 2019)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 706, |
| "end": 726, |
| "text": "(Wanta and Hu, 1994)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Previous works on media bias mostly focused on detecting bias either by using cues from the social media presence of the news sources (Kulshrestha et al., 2017; Ribeiro et al., 2018) , or by analyzing how bias is manifested within each news article . focused on flipping the bias of news headlines, which is a short one line text. Bail et al. (2018) showed that exposure to opposing views can increase political polarization. To the best of our knowledge, ours is the first attempt at gener- Figure 1 : Proposed threat model. Original news is used as seed by the \"Biased News Generator\" (explained in Section 4) to generate left or right biased news. Readers are then exposed to the generated biased news to change their original bias (either flip or increase).", |
| "cite_spans": [ |
| { |
| "start": 134, |
| "end": 160, |
| "text": "(Kulshrestha et al., 2017;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 161, |
| "end": 182, |
| "text": "Ribeiro et al., 2018)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 331, |
| "end": 349, |
| "text": "Bail et al. (2018)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 492, |
| "end": 500, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "ating full length biased news articles using high performance language models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our Contribution. In this paper, we use a threat model (Figure 1 ) to demonstrate that publicly available language models can reliably generate biased news content based on original news. In an ideal scenario, a user consumes original news from an aggregator and develops a confirmation bias (Nickerson, 1998) about entities mentioned in the news. If the news complements their bias, they likely jump to the original source to continue reading (Swire et al., 2017 ). Our threat model, we assume that the attacker is able to access the original news and have control over what a user will see when visiting the aggregator's platform. In this scenario, the attacker can rework the original news, by either shifting its bias farther than it originally was (Levendusky, 2013), or by flipping its original bias (Bail et al., 2018) . The attacker is also assumed to be able to access a large collection of news articles labeled with the bias (left or right) to use for training an LM. The attacker uses the original news as input to the LM for using as context to generate biased news. Finally, the attacker exposes readers to the generated biased news.", |
| "cite_spans": [ |
| { |
| "start": 292, |
| "end": 309, |
| "text": "(Nickerson, 1998)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 444, |
| "end": 463, |
| "text": "(Swire et al., 2017", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 806, |
| "end": 825, |
| "text": "(Bail et al., 2018)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 55, |
| "end": 64, |
| "text": "(Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To generate biased news, we fine-tuned the GPT-2 language model (LM) to create two different LMs, each trained on a specific type of biased news. We used an API built on a RoBERTa-based model (Liu et al., 2019 ) (explained in a later section) to classify the generated news as left or right biased. However, generating only the text for news is not enough.", |
| "cite_spans": [ |
| { |
| "start": 192, |
| "end": 209, |
| "text": "(Liu et al., 2019", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Therefore, we then fine-tuned another generative model, known as GROVER (Zellers et al., 2019) , which enables controllable generation of an entire news article -the body, title, news source, publication date, and author list. Finally, we performed a subjective evaluation with 80 participants -32 native and 48 non-native English speakers. The results show that the news articles generated by the models (machine-generated news) had almost the same fluency as those written by people (human-written). The participants tended to randomly select human-written news when asked to choose between two options: an excerpt from machine-generated news, and one from humanwritten news. Then we choose 24 of the 80 participants to evaluate the bias in the machine-generated news articles. They were able identify a bias 92% of the times, and assigned a correct bias rating 62.91% of the time.", |
| "cite_spans": [ |
| { |
| "start": 72, |
| "end": 94, |
| "text": "(Zellers et al., 2019)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we discuss related work on political bias datasets, bias analysis, bias generation and detection in news articles.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In the works that study bias, Arapakis et al. (2016) collected a dataset of 561 news articles, each being labeled with 14 qualitative aspects along with article's subjectivity. Another dataset, the multi-perspective question answering (MPQA) corpus (Wiebe et al., 2005) , contains 692 news articles, each with a label of its subjectivity. These two corpora were carefully developed with labels at the article and sentence levels. However, the labeling technique is costly to scale, and the corpora are not large enough (< 1000 samples), so developed a corpus of 2,781 events from the AllSides website to characterize and flip bias in news headlines. The corpus contains news headlines and articles presented by a left-leaning and a right-leaning news source paired together with an unbiased summary of the event. However, the labeling is news source specific, so there is no information about the bias at the article level. Moreover, the corpus is not large enough to be used to generate news articles. Therefore, for this study, we used the \"All-The-News\" dataset footnotehttps://www.kaggle.com/snapcrack/allthe-news.", |
| "cite_spans": [ |
| { |
| "start": 30, |
| "end": 52, |
| "text": "Arapakis et al. (2016)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 249, |
| "end": 269, |
| "text": "(Wiebe et al., 2005)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Political Bias Datasets", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Media bias has been under study for decades (Groseclose and Milyo, 2005; Fang et al., 2012; Arapakis et al., 2016) , and various aspects of political bias have been studied from different perspectives. For example, Groseclose and Milyo (2005) quantified bias for a sample of 20 news sources in the U.S. on the basis of the number of citations used by think tanks and policy groups. Their work is among the first ones to provide clear evidence of bias in media. Lin et al. (2011) proposed categorizing bias on the basis of variables like mentions of political parties, legislators, and ideology. Another study, , focused on liberal and conservative bias, and using manual annotation, found that bias indicators usually include named entities. A more recent study explored the idea with right and left bias, and experimentally showed that named entities are indeed important, and that bias is more evident in longer texts, i.e., in full length news articles, rather than in shorter texts like sentences and paragraphs . We performed the same analyses to evaluate the reliability of our dataset.", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 72, |
| "text": "(Groseclose and Milyo, 2005;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 73, |
| "end": 91, |
| "text": "Fang et al., 2012;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 92, |
| "end": 114, |
| "text": "Arapakis et al., 2016)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 215, |
| "end": 242, |
| "text": "Groseclose and Milyo (2005)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 461, |
| "end": 478, |
| "text": "Lin et al. (2011)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bias Analysis", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Advances in natural language processing have led to rapid development of several language generation techniques. With the release of transformer based model architectures and text representations (Vaswani et al., 2017; Devlin et al., 2018) , machines are now able to generate high quality text outputs , which may or may not preserve the context. To generate text that better preserves context, researchers have studied controllable text generation, i.e., how to rewrite a text so that it has certain attributes (Keskar et al., 2019; Zellers et al., 2019) . Several of these studies demonstrated that the text style can be transferred by simply changing the relevant words in an unsupervised manner (Li et al., 2018; Adelani et al., 2020; Shen et al., 2017) . demonstrated bias flipping in text, but only for the headlines of a news articles. To the best of our knowledge, ours is the first study on generating full-length biased news articles.", |
| "cite_spans": [ |
| { |
| "start": 196, |
| "end": 218, |
| "text": "(Vaswani et al., 2017;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 219, |
| "end": 239, |
| "text": "Devlin et al., 2018)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 512, |
| "end": 533, |
| "text": "(Keskar et al., 2019;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 534, |
| "end": 555, |
| "text": "Zellers et al., 2019)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 699, |
| "end": 716, |
| "text": "(Li et al., 2018;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 717, |
| "end": 738, |
| "text": "Adelani et al., 2020;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 739, |
| "end": 757, |
| "text": "Shen et al., 2017)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Biased Headline Generation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "There have been several attempts in the past to identify bias as left or right at the article level (Zhao et al.; Baly et al., 2018; Wang, 2019) , and at the source level (Ribeiro et al., 2018; Kulshrestha et al., 2017; . The classification of a media source as left leaning or right leaning is flawed if one starts to look at each article to identify its bias. We are more interested in the text and style of bias in news articles, and therefore, we focused on bias at the article level. At article level, Zhao et al.; Baly et al. 2018used a smaller dataset and shallow models to classify bias at an article level using three labels. Using recent advancements in the field of natural language processing, Wang (2019) created a state-of-the-art regression model to quantify bias in news articles by using RoBERTa-based model (Liu et al., 2019) and trained it on several datasets like the Adfontes-Media's list of articles and webhose.io 1 , and so on for generalizability. We used the RoBERTa-based model to generate automatic bias ratings and evaluate bias in generated text.", |
| "cite_spans": [ |
| { |
| "start": 114, |
| "end": 132, |
| "text": "Baly et al., 2018;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 133, |
| "end": 144, |
| "text": "Wang, 2019)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 171, |
| "end": 193, |
| "text": "(Ribeiro et al., 2018;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 194, |
| "end": 219, |
| "text": "Kulshrestha et al., 2017;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 825, |
| "end": 843, |
| "text": "(Liu et al., 2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Identification of Bias in News Articles", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "3 Dataset and Discriminativeness Ratio", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Identification of Bias in News Articles", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "The dataset we used is a collection of 139,668 full length news articles curated using the Internet Archive 2 from 15 major news sources in the U.S. and is available on the Kaggle website under the name of \"All the news\" data 3 . For each source, the Internet Archive was used to grab the past year-and-a-half of either homepage headlines or RSS feeds and their links were parsed through a scraper. The data obtained were not the product of scraping an entire site, but rather of scraping the more prominently placed articles. For example, CNN's articles from 5 June 2016 were what appeared on the homepage of CNN at the time of data collection. Similarly, Vox's articles from that time were everything that appeared in the Vox RSS reader, and so on. Therefore, we had a news article with its headline, publication source, publication date, and full-length body. The collection of news articles did not have its bias ratings at the article level. We used a RoBERTa-based regression model made available to us upon requesting to \"The Bipartisan Press\" 4 to create bias ratings. \"The Bipartisan Press\" annotated the data using Adfontes Media's methodology (Otero, 2019), which involves an initial screening and training to hire experts to annotate news articles with their bias on a scale of -42 to +42. A negative sign indicates a left-leaning bias and a positive sign indicates a right-leaning bias. We used the regression model to calculate the bias in each news article and treated these bias ratings as the ground truth. We further used the same model to evaluate the bias of the generated news articles. Table 1 lists some statistics about the \"All the news\" dataset. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1608, |
| "end": 1615, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "All The News Dataset and Automatic Bias Ratings", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Bias can be found in a text if it expresses sentiment towards a specific entity ( a person, a place, or a policy). proposed a discriminativeness ratio to capture the fundamental difference between biased and sentimental text based on word frequency. The ratio is given as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discriminativeness Ratio", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "occ(w, D t ) occ(w, D t )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discriminativeness Ratio", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where occ(w, D) is the frequency of w in text D and t and t are types of text. In biased text, t and t correspond to right and left, while in sentimental text they represent positive and negative sentiments, respectively. Usage of the discriminativeness ratio results in type-unrelated words having values close to 1, as they appear almost equally in both types of text. On the other hand, words that appear often in one type and rarely in the other will have higher (type t) and lower values (type t ) values, respectively. Table 2 : Three words with highest and lowest discriminativeness ratio, and words with ratio very close to one. Table 2 lists the words having the highest and the words having the lowest discriminativeness ratio for sentimental text and biased text. We show the results for sentimental text to simplify the explanation. The top three words in the sentimental text are positive, the bottom three are negative, and sentiment-unrelated words have a value close to one. In the biased text, the three type-unrelated words (ratio of 1.0) included both positive (\"aired\" and \"recused\") and negative (\"suspicion\") sentiment words. This is because both left-and rightbiased texts use sentiment words to support and oppose entities. In addition, the top three and the bottom three biased-text words are named entities, indicating that articles with either bias tend to criticize or support different named entities, using the same words to convey sentiments. In line with this, a bias analysis by Yano et al. (2010) revealed that named entities are often bias indicators.", |
| "cite_spans": [ |
| { |
| "start": 1512, |
| "end": 1530, |
| "text": "Yano et al. (2010)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 525, |
| "end": 532, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 637, |
| "end": 644, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discriminativeness Ratio", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The most important parts of the proposed method for generating biased news is the GPT-2 text gen-eration model and the controllable text generation model (Zellers et al., 2019) . As shown in Figure 2 , we used a two step approach to generate biased news: generation and validation. In the generation step, an attacker provides an original news article x as the seed input to a generation models. The models then generate a modified article x' based on x. In the validation step, the generated articles are classified on the basis of bias. The attacker is assumed to have access to such a classifier and uses it to segregate left-and right-biased news. The details of our proposed method are discussed below. ", |
| "cite_spans": [ |
| { |
| "start": 154, |
| "end": 176, |
| "text": "(Zellers et al., 2019)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 191, |
| "end": 199, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Biased News Generation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The task of a language model is to learn the probability distribution of a text corpus to enable the next word to be predicted on the basis of contextual words. Given a sequence of words, w = (w 1 , w 2 , ..., w T ), the probability of the sequence is given as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GPT-2 Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (w) = T i=1 P (w t |w 1 , w 2 , ..., w t\u22121 )", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "GPT-2 Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Probability P(w) is calculated by learning the conditional probability of each word given a fixed number of k-context words. Many neural network architectures have been used to estimate P(w) including a feed-forward neural network (Bengio et al., 2003) , a recurrent neural network (Mikolov et al., 2010; Sundermeyer et al., 2012) , and the transformer architectures (Radford et al., 2018) . A GPT-2 model based on a transformer architecture has been shown to have a lower perplexity for language modeling datasets, and to generate high quality fluent texts. Therefore, we used a GPT-2 model and fine tuned it on a dataset of left-and right-biased news.", |
| "cite_spans": [ |
| { |
| "start": 231, |
| "end": 252, |
| "text": "(Bengio et al., 2003)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 282, |
| "end": 304, |
| "text": "(Mikolov et al., 2010;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 305, |
| "end": 330, |
| "text": "Sundermeyer et al., 2012)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 367, |
| "end": 389, |
| "text": "(Radford et al., 2018)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GPT-2 Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In the fine-tuning, the model was first initialized using pre-trained weights instead of random initial weights. Fine-tuning is faster than training an LM with a large number of parameters from scratch. It has been shown that fine-tuning using labeled data after initializing the model with pre-trained parameters improves the accuracy of downstream tasks (Devlin et al., 2018) . Therefore, we fine tuned the GPT-2 LM using left-biased and right-biased news.", |
| "cite_spans": [ |
| { |
| "start": 356, |
| "end": 377, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GPT-2 Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Using techniques from Zhang et al. 2015, we divided the news articles from each set into training and test sets. We used a reliable implementation of the GPT-2 model available on Github 5 to fine-tune the pre-trained model on the \"All the news\" dataset. We used the default values for all hyperparameters. The number of training samples for left-and right-biased media were unbalanced, but since we trained a separate model for each, we had enough data for fine-tuning two good models. We fine tuned two 117M GPT-2 models, one for each type of bias. We used 85,664 and 44,004 news articles, respectively, to train the two models and 5000 each to test them for perplexity. The perplexity on the test set for the two models trained was 17.43 and 18.30, respectively, which is quite good (i.e., value less than 20 is what we look for ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GPT-2 Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Finally, we generated 5000 samples for each bias type. We loaded the corresponding model and used prompts from the original articles to generate biased ones. Table 3 shows a sample for each type of bias. The generated articles are fluent and meaningful. The generated news is \"fake\" and reports incorrect factual information. For example, in the first sample, the original news has entities like {U.S. farm industry, White House} while the generated one completely changed them to {U.S. Economy, San Jose}.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 158, |
| "end": 165, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "GPT-2 Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Seed Text: President Donald Trump said he would seek to keep his tough immigration enforcement policies from harming the U. S. farm industry and its largely immigrant workforce, according to farmers and officials who met with him. At a roundtable on farm labor at the White House last month, Trump said he did not want to create labor problems for farmers and would look into improving a program that brings in temporary agricultural workers on legal visas. President Donald Trump said he would seek to keep his tough immigration enforcement policies from harming the U. S. economy, and that the 25 countries with \"green cards\" had jobs to farm industry and its largely immigrant workforce according to farmers and officials who met with him last week in San Jose. (Bias: 5.0) Left Biased Seed Text: Hillary Clinton and her team got in on the challenge, too, offering a special message to voters to \"Don't Stand Still, Vote Today. \". Her video features cameos from former President Bill Clinton along with Huma Abedin and Jon Bon Jovi. The original video had no soundtrack, so Slim Jxmmi of Rae Sremmurd added in their track \"Black Beatles\" to the Clinton version. Pretty hip. Hillary Clinton and her team got in on the challenge too offering a special message to voters to Dont Stand Still Vote Today and Trump's campaign just couldn't do anything better. Her video features cameos from former President Bill Clinton along with Huma Abedin and Jon Bon Jovi. The clip was posted on YouTube in reaction to Trump's comments, which BuzzFeed News reported. (Bias: -13.0) Table 3 : Example biased news generated using fine-tuned GPT-2 LM. For the sake of brevity, only the first three sentences of original and generated articles are presented (Grusky et al., 2018) . Generated text is shown in italics. : Example biased news generated using fine-tuned GROVER LM. For the sake of brevity, only the first three sentences of original and generated articles are presented (Grusky et al., 2018) . Generated text is shown in italics.", |
| "cite_spans": [ |
| { |
| "start": 1739, |
| "end": 1760, |
| "text": "(Grusky et al., 2018)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1964, |
| "end": 1985, |
| "text": "(Grusky et al., 2018)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1567, |
| "end": 1574, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Right Biased", |
| "sec_num": null |
| }, |
| { |
| "text": "The news articles generated by the GPT-2 model contain unstructured text, beginning with a start token and ending with an end token. The end token is particularly important as it in-dicates when to stop generating. However, in addition to unstructured running text, i.e., the body text, a news article has additional elements, including the publication domain, the publication date,the authors, and the headline. Generating a realistic and controlled news article requires producing all of these components. Therefore, a news article can be modeled as a joint distribution: P (domain, date, authors, headline, body) (2) Zellers et al. (2019) used the language modeling framework from equation 1 in a way that enables flexible decomposition of equation 2. GROVER starts with a set of fields F as context, with each field containing specific start and end tokens. To generate a target field \u03c4 , we append the field specific start \u2212 \u03c4 to the given context tokens to sample from the model until the end \u2212 \u03c4 token is reached. For biased news generation, we fix the body of the article as the target field \u03c4 and use the other fields (domain, date, authors, headline) as context. We load pre-trained model weights to fine tunethe GROVER LM to generate biased news.", |
| "cite_spans": [ |
| { |
| "start": 620, |
| "end": 641, |
| "text": "Zellers et al. (2019)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "GROVER model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We used the same training-test distribution as for the GPT-2 model. We defined context F as the set {headline, date, author[s], domain} and target \u03c4 as the body of the article to be generated using F as context. Note that, GROVER does not need seed phrases for generation. Instead, it uses headline, date, author[s], and domain for generating the body. Table 4 shows a sample for each type of bias. The generated articles are fluent and appear consistent as they are presented with a domain, date, headline and author[s] names. Figure 3 shows the bias distributions for all the 5000 generated articles, reflecting the bias of each source. As can be clearly seen, the distributions are shifted towards the extremes for both the leftand right-biased samples, shown by the bumps being closer to the left extreme (-20) or the right extreme (+20).", |
| "cite_spans": [ |
| { |
| "start": 809, |
| "end": 814, |
| "text": "(-20)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 353, |
| "end": 360, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 528, |
| "end": 536, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "GROVER model", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To subjectively evaluate our proposed method, we asked a pool of native and non-native English speakers (annotators) to evaluate the generated biased news articles on the basis of fluency and the bias of the text. We explicitly instructed them to ignore factuality because we wanted to evaluate and validate the quality and bias of the generated articles, not their correctness.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Subjective Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "For evaluating quality, we considered two categories of articles: human-written ones from news sources, and machine-generated ones produced by the GPT-2 or GROVER models. The participants were asked to identify whether an excerpt was taken from a human-written, or a machinegenerated article. They were shown two options to choose from, one from each class, humanwritten and machine-generated. Each annotator was shown ten pairs of excerpts (one humanwritten and one machine-generated) and asked to identify, which was the human-written one. The average selection rate was used as the metric. Fur-(a) Bias Distribution in Human Written News (b) Bias Distribution in Machine Generated News Figure 3 : Difference in bias ratings between human-written and machine generated news (using human-written news as seed for each generation). The machine-generated news is more extreme (biased) due to being generated by fine tuned models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 689, |
| "end": 697, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Subjective Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "ther, to facilitate the evaluation, the excerpts were shortened to only three or four sentences. The evaluations were performed on a web interface with the two types of excerpts chosen randomly from two pools of samples. Of the 80 participants, 32 were native speakers and the rest 48 were non-native speakers. As shown in Table 5 , the non-native English speakers tended to mark the machine-generated excerpts as human-written ones. Since the outputs from the GPT-2 and GROVER models were very similar, the ratio of participants who failed to identify the human-written news correctly was about the same for the GPT-2-and GROVER-generated samples. The lowest ratio (43%) was for native speakers and the GROVER samples, and the highest ratio (50%) was for non-native speakers and the GPT-2 samples. Most of the values are closer to 0.50, which indicates that the participants tended to make a random selection among the two categories of articles.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 323, |
| "end": 330, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Subjective Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Native Non Native Overall GPT-2 0.46 (16) 0.50 (23) 0.49 (39) GROVER 0.43 160.48 (25) 0.46 (41) For evaluating bias, we selected 24 of the 80 participants, each having at least a college degree or who were enrolled in college at the time of annotation. We trained them to understand the media bias using various resources 6 . Since the training was not rigorous, we made the problem simpler by treating bias as a binary variable having two values, i.e., left and right. For cases in which the participant was not sure, we asked them to mark the question with can't say. Each participant was shown ten excerpts at random from the generated text and they were asked to mark their bias rating. As in the quality evaluation, only three or four sentences were shown for the sake of simplicity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "The participants were able to identify a clear bias 92% of the times. They marked the option of can't say only 8% of the time. To determine the percentage of times the participants were able to identify the bias correctly, we needed to define \"correctly\", which is subjective. We judged that a bias rating was correct if the participant's choice 6 https://www.coursera.org/learn/media (left or right) matched that of the automatic bias evaluation . We used the API built on a RoBERTabased model to automatically generate bias ratings for the sample excerpts shown to the participants. We found that the participants were able to identify the bias correctly 63% of the time. The percentage might have been higher with more training and a better understanding of bias.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "Our use of the API made available to us by \"The Bipartsan Press\" to evaluate bias is a major limitation of this study. Evaluating text for bias is a very complex problem. The API was built on a RoBERTa based model trained on a dataset curated by Adfontes Media. The dataset was annotated by 20 expert annotators with at least a college degree after an extensive screening and training process 7 . Hiring and training such annotators is expensive, and relying on non-expert annotators to calculate media bias in generated news is not promising. Since our findings conforms to the results reported by relevant literature on media bias, it is safe to assume that the results obtained using the RoBERTa-based model (with a 4% error rate) are reliable in terms of segregating left-biased media from right-biased media.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We have presented a threat model and discussed how news aggregators (attackers) can manipulate readers' opinions by flipping or increasing their bias. We described two language models generating biased news: the high-performance GPT-2 LM and the GROVER LM for controllable text generation. We used a large news article dataset to fine tune them. We used a RoBERTa-based regression model to create automatic bias ratings and to evaluate bias in generated news. Subjective evaluation of generated news articles by 80 participants suggests that they made random selections between the machine-generated and human-written news excerpts, indicating that the machine-generated news is fluent and looks similar to human-written news. Out of the 80 participants, 24 were chosen for a bias evaluation. The participants were able to see a clear bias most of the times, and marked correct bias 63% of the times.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "For future work, techniques for a more granular control on text generation can be explored, where one can adversarially inject bias to generate twisted versions of news stories. Techniques to introduce bias during machine translation of a news article from one language to another can be explored and evaluated by comparing the generated news after translation with the news generated by non-native speakers while converting news from other languages. Apart from named entities and sentence length, there are more intrinsic patterns representing presence of bias in text. Exploration studies to find such patterns can also be done in future to better understand bias distribution in text. Another future direction can be to quantify the impact of delivering biased news to real-world users using some social media platform.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Sometimes biased text segments can be identified just by looking into the title (i.e. only one sentence), as we go along the bias may or may not increase. Intuitively, as we increase the length of text tested for presence of bias, the bias should also increase.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1 Granularity Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "We have taken equal number of samples, i.e. 5,000, from both sides of bias. To test this hypothesis, we divided the news into 4 parts: sentence-1, which is just the title; sentence-3, first three sentences of news article (Grusky et al., 2018 )(Lede-3); sentence-10, first 10 sentences of the news article ; and finally full-length, which represents the complete news. Figure 4 shows that bias ratings increase as we increase the length of news being tested for bias. Figure 4 : Granularity Analysis. The bias ratings increase as we increase the length of text to test for bias infestation.", |
| "cite_spans": [ |
| { |
| "start": 222, |
| "end": 242, |
| "text": "(Grusky et al., 2018", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 369, |
| "end": 377, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 468, |
| "end": 476, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A.1 Granularity Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "We evaluated three automatic detection models, GLTR (Gehrmann et al., 2019) , GROVER (Zellers et al., 2019) , and GPT-2 PD (Solaiman et al., 2019) using 80 samples (news excerpts) each from human written, GPT-2 generated, and GROVER generated news. GLTR gives different probabilities of words being in top10, top100, and so on, and the other models give a probability score. We have used regression models as fusion functions while predicting with combined models. Table 6 shows detection results.", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 75, |
| "text": "(Gehrmann et al., 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 85, |
| "end": 107, |
| "text": "(Zellers et al., 2019)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 123, |
| "end": 146, |
| "text": "(Solaiman et al., 2019)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 465, |
| "end": 472, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A.2 Automatic Detection", |
| "sec_num": null |
| }, |
| { |
| "text": "GPT-2 Generated GROVER Generated Overall Table 6 : Equal error rate in differentiating between human written and machine generated news. We have used three approaches independently as well as a combination of them. \"+\" indicates score fusion.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 41, |
| "end": 48, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Detector", |
| "sec_num": null |
| }, |
| { |
| "text": "http://webhose.io/ 2 https://archive.org 3 https://www.kaggle.com/snapcrack/all-the-news", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.thebipartisanpress.com/political-bias-apiand-integrations/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/huggingface/transformers", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.adfontesmedia.com/how-ad-fontes-ranksnews-sources/?v=402f03a963ba", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This research was partly supported by JST CREST Grant JPMJCR18A6 and JSPS KAKENHI Grant JP16H06302 and JP18H04120, Japan.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Generating sentiment-preserving fake online reviews using neural language models and their human-and machine-based detection", |
| "authors": [ |
| { |
| "first": "Haotian", |
| "middle": [], |
| "last": "David Ifeoluwa Adelani", |
| "suffix": "" |
| }, |
| { |
| "first": "Fuming", |
| "middle": [], |
| "last": "Mai", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Huy", |
| "suffix": "" |
| }, |
| { |
| "first": "Junichi", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Isao", |
| "middle": [], |
| "last": "Yamagishi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Echizen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "International Conference on Advanced Information Networking and Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "1341--1354", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Ifeoluwa Adelani, Haotian Mai, Fuming Fang, Huy H Nguyen, Junichi Yamagishi, and Isao Echizen. 2020. Generating sentiment-preserving fake online reviews using neural language models and their human-and machine-based detection. In International Conference on Advanced Information Networking and Applications, pages 1341-1354. Springer.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Visualizing media bias through twitter", |
| "authors": [ |
| { |
| "first": "Jisun", |
| "middle": [], |
| "last": "An", |
| "suffix": "" |
| }, |
| { |
| "first": "Meeyoung", |
| "middle": [], |
| "last": "Cha", |
| "suffix": "" |
| }, |
| { |
| "first": "Krishna", |
| "middle": [], |
| "last": "Gummadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [], |
| "last": "Crowcroft", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniele", |
| "middle": [], |
| "last": "Quercia", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Sixth International AAAI Conference on Weblogs and Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jisun An, Meeyoung Cha, Krishna Gummadi, Jon Crowcroft, and Daniele Quercia. 2012. Visualizing media bias through twitter. In Sixth International AAAI Conference on Weblogs and Social Media.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Linguistic benchmarks of online news article quality", |
| "authors": [ |
| { |
| "first": "Ioannis", |
| "middle": [], |
| "last": "Arapakis", |
| "suffix": "" |
| }, |
| { |
| "first": "Filipa", |
| "middle": [], |
| "last": "Peleja", |
| "suffix": "" |
| }, |
| { |
| "first": "Barla", |
| "middle": [], |
| "last": "Berkant", |
| "suffix": "" |
| }, |
| { |
| "first": "Joao", |
| "middle": [], |
| "last": "Magalhaes", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1893--1902", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ioannis Arapakis, Filipa Peleja, Barla Berkant, and Joao Magalhaes. 2016. Linguistic benchmarks of online news article quality. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 1893-1902.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2015. Neural machine translation by jointly learning to align and translate. CoRR, abs/1409.0473.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Exposure to opposing views on social media can increase political polarization", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Lisa", |
| "middle": [ |
| "P" |
| ], |
| "last": "Bail", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Argyle", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Taylor", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Haohan", |
| "middle": [], |
| "last": "Bumpus", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaemin", |
| "middle": [], |
| "last": "Mb Fallin Hunzaker", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcus", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Friedolin", |
| "middle": [], |
| "last": "Mann", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Merhout", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Volfovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the National Academy of Sciences", |
| "volume": "115", |
| "issue": "37", |
| "pages": "9216--9221", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher A Bail, Lisa P Argyle, Taylor W Brown, John P Bumpus, Haohan Chen, MB Fallin Hunza- ker, Jaemin Lee, Marcus Mann, Friedolin Merhout, and Alexander Volfovsky. 2018. Exposure to op- posing views on social media can increase political polarization. Proceedings of the National Academy of Sciences, 115(37):9216-9221.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Predicting factuality of reporting and bias of news media sources", |
| "authors": [ |
| { |
| "first": "Ramy", |
| "middle": [], |
| "last": "Baly", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgi", |
| "middle": [], |
| "last": "Karadzhov", |
| "suffix": "" |
| }, |
| { |
| "first": "Dimitar", |
| "middle": [], |
| "last": "Alexandrov", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.01765" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramy Baly, Georgi Karadzhov, Dimitar Alexandrov, James Glass, and Preslav Nakov. 2018. Predict- ing factuality of reporting and bias of news media sources. arXiv preprint arXiv:1810.01765.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A neural probabilistic language model", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9jean", |
| "middle": [], |
| "last": "Ducharme", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Jauvin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of machine learning research", |
| "volume": "3", |
| "issue": "", |
| "pages": "1137--1155", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, R\u00e9jean Ducharme, Pascal Vincent, and Christian Jauvin. 2003. A neural probabilistic lan- guage model. Journal of machine learning research, 3(Feb):1137-1155.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Learning to flip the bias of news headlines", |
| "authors": [ |
| { |
| "first": "Wei-Fan", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Henning", |
| "middle": [], |
| "last": "Wachsmuth", |
| "suffix": "" |
| }, |
| { |
| "first": "Khalid", |
| "middle": [ |
| "Al" |
| ], |
| "last": "Khatib", |
| "suffix": "" |
| }, |
| { |
| "first": "Benno", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 11th International Conference on Natural Language Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "79--88", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei-Fan Chen, Henning Wachsmuth, Khalid Al Khatib, and Benno Stein. 2018. Learning to flip the bias of news headlines. In Proceedings of the 11th International Conference on Natural Language Generation, pages 79-88.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Mining contrastive opinions on political texts using cross-perspective topic model", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "Luo", |
| "middle": [], |
| "last": "Si", |
| "suffix": "" |
| }, |
| { |
| "first": "Naveen", |
| "middle": [], |
| "last": "Somasundaram", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengtao", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the fifth ACM international conference on Web search and data mining", |
| "volume": "", |
| "issue": "", |
| "pages": "63--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi Fang, Luo Si, Naveen Somasundaram, and Zhengtao Yu. 2012. Mining contrastive opinions on political texts using cross-perspective topic model. In Pro- ceedings of the fifth ACM international conference on Web search and data mining, pages 63-72.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Gltr: Statistical detection and visualization of generated text", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Gehrmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Hendrik", |
| "middle": [], |
| "last": "Strobelt", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander M", |
| "middle": [], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.04043" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Gehrmann, Hendrik Strobelt, and Alexan- der M Rush. 2019. Gltr: Statistical detection and visualization of generated text. arXiv preprint arXiv:1906.04043.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A measure of media bias", |
| "authors": [ |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Groseclose", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Milyo", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "The Quarterly Journal of Economics", |
| "volume": "120", |
| "issue": "4", |
| "pages": "1191--1237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tim Groseclose and Jeffrey Milyo. 2005. A measure of media bias. The Quarterly Journal of Economics, 120(4):1191-1237.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Newsroom: A dataset of 1.3 million summaries with diverse extractive strategies", |
| "authors": [ |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Grusky", |
| "suffix": "" |
| }, |
| { |
| "first": "Mor", |
| "middle": [], |
| "last": "Naaman", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1804.11283" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Max Grusky, Mor Naaman, and Yoav Artzi. 2018. Newsroom: A dataset of 1.3 million summaries with diverse extractive strategies. arXiv preprint arXiv:1804.11283.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Ctrl: A conditional transformer language model for controllable generation", |
| "authors": [ |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Nitish Shirish Keskar", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccann", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Lav", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Varshney", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.05858" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitish Shirish Keskar, Bryan McCann, Lav R Varshney, Caiming Xiong, and Richard Socher. 2019. Ctrl: A conditional transformer language model for control- lable generation. arXiv preprint arXiv:1909.05858.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Quantifying search bias: Investigating sources of bias for political searches in social media", |
| "authors": [ |
| { |
| "first": "Juhi", |
| "middle": [], |
| "last": "Kulshrestha", |
| "suffix": "" |
| }, |
| { |
| "first": "Motahhare", |
| "middle": [], |
| "last": "Eslami", |
| "suffix": "" |
| }, |
| { |
| "first": "Johnnatan", |
| "middle": [], |
| "last": "Messias", |
| "suffix": "" |
| }, |
| { |
| "first": "Muhammad", |
| "middle": [], |
| "last": "Bilal Zafar", |
| "suffix": "" |
| }, |
| { |
| "first": "Saptarshi", |
| "middle": [], |
| "last": "Ghosh", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Krishna", |
| "suffix": "" |
| }, |
| { |
| "first": "Karrie", |
| "middle": [], |
| "last": "Gummadi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Karahalios", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 ACM Conference on Computer Supported Cooperative Work and Social Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "417--432", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juhi Kulshrestha, Motahhare Eslami, Johnnatan Mes- sias, Muhammad Bilal Zafar, Saptarshi Ghosh, Kr- ishna P Gummadi, and Karrie Karahalios. 2017. Quantifying search bias: Investigating sources of bias for political searches in social media. In Pro- ceedings of the 2017 ACM Conference on Computer Supported Cooperative Work and Social Computing, pages 417-432.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Why do partisan media polarize viewers?", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Levendusky", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "American Journal of Political Science", |
| "volume": "57", |
| "issue": "3", |
| "pages": "611--623", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew S Levendusky. 2013. Why do partisan me- dia polarize viewers? American Journal of Political Science, 57(3):611-623.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Delete, retrieve, generate: A simple approach to sentiment and style transfer", |
| "authors": [ |
| { |
| "first": "Juncen", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1804.06437" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juncen Li, Robin Jia, He He, and Percy Liang. 2018. Delete, retrieve, generate: A simple approach to sentiment and style transfer. arXiv preprint arXiv:1804.06437.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "More voices than ever? quantifying media bias in networks", |
| "authors": [ |
| { |
| "first": "Yu-Ru", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "P" |
| ], |
| "last": "Bagrow", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Lazer", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Fifth International AAAI Conference on Weblogs and Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu-Ru Lin, James P Bagrow, and David Lazer. 2011. More voices than ever? quantifying media bias in networks. In Fifth International AAAI Conference on Weblogs and Social Media.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Neural text generation: Past, present and beyond", |
| "authors": [ |
| { |
| "first": "Sidi", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaoming", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Weinan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1803.07133" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sidi Lu, Yaoming Zhu, Weinan Zhang, Jun Wang, and Yong Yu. 2018. Neural text genera- tion: Past, present and beyond. arXiv preprint arXiv:1803.07133.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Recurrent neural network based language model", |
| "authors": [ |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Karafi\u00e1t", |
| "suffix": "" |
| }, |
| { |
| "first": "Luk\u00e1\u0161", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "Ja\u0148", |
| "middle": [], |
| "last": "Cernock\u1ef3", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Eleventh annual conference of the international speech communication association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom\u00e1\u0161 Mikolov, Martin Karafi\u00e1t, Luk\u00e1\u0161 Burget, Ja\u0148 Cernock\u1ef3, and Sanjeev Khudanpur. 2010. Recurrent neural network based language model. In Eleventh annual conference of the international speech com- munication association.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Confirmation bias: A ubiquitous phenomenon in many guises", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Raymond S Nickerson", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Review of general psychology", |
| "volume": "2", |
| "issue": "2", |
| "pages": "175--220", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raymond S Nickerson. 1998. Confirmation bias: A ubiquitous phenomenon in many guises. Review of general psychology, 2(2):175-220.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Display and analysis system for media content", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Vanessa", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Otero", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vanessa L Otero. 2019. Display and analysis system for media content. US Patent App. 16/204,795.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Improving language understanding by generative pre-training", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Narasimhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Salimans", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language understanding by generative pre-training. URL https://s3-us-west-2. amazonaws. com/openai- assets/researchcovers/languageunsupervised/language understanding paper. pdf.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI Blog", |
| "volume": "1", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog, 1(8):9.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Media bias monitor: Quantifying biases of social media news outlets at large-scale", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Filipe", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucas", |
| "middle": [], |
| "last": "Ribeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabricio", |
| "middle": [], |
| "last": "Henrique", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhijnan", |
| "middle": [], |
| "last": "Benevenuto", |
| "suffix": "" |
| }, |
| { |
| "first": "Juhi", |
| "middle": [], |
| "last": "Chakraborty", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahmoudreza", |
| "middle": [], |
| "last": "Kulshrestha", |
| "suffix": "" |
| }, |
| { |
| "first": "Krishna", |
| "middle": [ |
| "P" |
| ], |
| "last": "Babaei", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gummadi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Twelfth International AAAI Conference on Web and Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Filipe N Ribeiro, Lucas Henrique, Fabricio Ben- evenuto, Abhijnan Chakraborty, Juhi Kulshrestha, Mahmoudreza Babaei, and Krishna P Gummadi. 2018. Media bias monitor: Quantifying biases of social media news outlets at large-scale. In Twelfth International AAAI Conference on Web and Social Media.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Style transfer from non-parallel text by cross-alignment", |
| "authors": [ |
| { |
| "first": "Tianxiao", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Lei", |
| "suffix": "" |
| }, |
| { |
| "first": "Regina", |
| "middle": [], |
| "last": "Barzilay", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jaakkola", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "6830--6841", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianxiao Shen, Tao Lei, Regina Barzilay, and Tommi Jaakkola. 2017. Style transfer from non-parallel text by cross-alignment. In Advances in neural informa- tion processing systems, pages 6830-6841.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Release strategies and the social impacts of language models", |
| "authors": [ |
| { |
| "first": "Irene", |
| "middle": [], |
| "last": "Solaiman", |
| "suffix": "" |
| }, |
| { |
| "first": "Miles", |
| "middle": [], |
| "last": "Brundage", |
| "suffix": "" |
| }, |
| { |
| "first": "Jack", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanda", |
| "middle": [], |
| "last": "Askell", |
| "suffix": "" |
| }, |
| { |
| "first": "Ariel", |
| "middle": [], |
| "last": "Herbert-Voss", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jasmine", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1908.09203" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Irene Solaiman, Miles Brundage, Jack Clark, Amanda Askell, Ariel Herbert-Voss, Jeff Wu, Alec Radford, and Jasmine Wang. 2019. Release strategies and the social impacts of language models. arXiv preprint arXiv:1908.09203.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Lstm neural networks for language modeling", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Sundermeyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralf", |
| "middle": [], |
| "last": "Schl\u00fcter", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Thirteenth annual conference of the international speech communication association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin Sundermeyer, Ralf Schl\u00fcter, and Hermann Ney. 2012. Lstm neural networks for language modeling. In Thirteenth annual conference of the international speech communication association.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Journal of experimental psychology: learning, memory, and cognition", |
| "authors": [ |
| { |
| "first": "Briony", |
| "middle": [], |
| "last": "Swire", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "H" |
| ], |
| "last": "Ullrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Ecker", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lewandowsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "43", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Briony Swire, Ullrich KH Ecker, and Stephan Lewandowsky. 2017. The role of familiarity in cor- recting inaccurate information. Journal of experi- mental psychology: learning, memory, and cogni- tion, 43(12):1948.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Show and tell: A neural image caption generator", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Toshev", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Dumitru", |
| "middle": [], |
| "last": "Erhan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "3156--3164", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. 2015. Show and tell: A neural im- age caption generator. In Proceedings of the IEEE conference on computer vision and pattern recogni- tion, pages 3156-3164.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Calculating political bias and fighting partisanship with ai", |
| "authors": [ |
| { |
| "first": "Winston", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Winston Wang. 2019. Calculating political bias and fighting partisanship with ai.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "The effects of credibility, reliance, and exposure on media agendasetting: A path analysis model", |
| "authors": [ |
| { |
| "first": "Wayne", |
| "middle": [], |
| "last": "Wanta", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu-Wei", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Journalism Quarterly", |
| "volume": "71", |
| "issue": "1", |
| "pages": "90--98", |
| "other_ids": { |
| "DOI": [ |
| "10.1177/107769909407100109" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wayne Wanta and Yu-Wei Hu. 1994. The effects of credibility, reliance, and exposure on media agenda- setting: A path analysis model. Journalism Quar- terly, 71(1):90-98.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Leading news websites in the u.s", |
| "authors": [ |
| { |
| "first": "Amy", |
| "middle": [], |
| "last": "Watson", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amy Watson. 2019. Leading news websites in the u.s. 2018, by unique visitors.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Annotating expressions of opinions and emotions in language. Language resources and evaluation", |
| "authors": [ |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "39", |
| "issue": "", |
| "pages": "165--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Janyce Wiebe, Theresa Wilson, and Claire Cardie. 2005. Annotating expressions of opinions and emo- tions in language. Language resources and evalua- tion, 39(2-3):165-210.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "The cambridge analytica scandal changed the world-but it didn't change facebook. The Guardian", |
| "authors": [ |
| { |
| "first": "Julia", |
| "middle": [ |
| "Carrie" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "18", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julia Carrie Wong. 2019. The cambridge analytica scandal changed the world-but it didn't change facebook. The Guardian, 18.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Shedding (a thousand points of) light on biased language", |
| "authors": [ |
| { |
| "first": "Tae", |
| "middle": [], |
| "last": "Yano", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the NAACL HLT 2010 Workshop on Creating Speech and Language Data with Amazon's Mechanical Turk", |
| "volume": "", |
| "issue": "", |
| "pages": "152--158", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tae Yano, Philip Resnik, and Noah A Smith. 2010. Shedding (a thousand points of) light on biased lan- guage. In Proceedings of the NAACL HLT 2010 Workshop on Creating Speech and Language Data with Amazon's Mechanical Turk, pages 152-158. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Defending against neural fake news", |
| "authors": [ |
| { |
| "first": "Rowan", |
| "middle": [], |
| "last": "Zellers", |
| "suffix": "" |
| }, |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Holtzman", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannah", |
| "middle": [], |
| "last": "Rashkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Bisk", |
| "suffix": "" |
| }, |
| { |
| "first": "Ali", |
| "middle": [], |
| "last": "Farhadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Franziska", |
| "middle": [], |
| "last": "Roesner", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "9054--9065", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rowan Zellers, Ari Holtzman, Hannah Rashkin, Yonatan Bisk, Ali Farhadi, Franziska Roesner, and Yejin Choi. 2019. Defending against neural fake news. In Advances in Neural Information Process- ing Systems, pages 9054-9065.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Semantic sentence embeddings for paraphrasing and text summarization", |
| "authors": [ |
| { |
| "first": "Chi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shagan", |
| "middle": [], |
| "last": "Sah", |
| "suffix": "" |
| }, |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Dheeraj", |
| "middle": [], |
| "last": "Peri", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Loui", |
| "suffix": "" |
| }, |
| { |
| "first": "Carl", |
| "middle": [], |
| "last": "Salvaggio", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [], |
| "last": "Ptucha", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "IEEE Global Conference on Signal and Information Processing (GlobalSIP)", |
| "volume": "", |
| "issue": "", |
| "pages": "705--709", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi Zhang, Shagan Sah, Thang Nguyen, Dheeraj Peri, Alexander Loui, Carl Salvaggio, and Raymond Ptucha. 2017. Semantic sentence embeddings for paraphrasing and text summarization. In 2017 IEEE Global Conference on Signal and Information Pro- cessing (GlobalSIP), pages 705-709. IEEE.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Character-level convolutional networks for text classification", |
| "authors": [ |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Junbo", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "649--657", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text clas- sification. In Advances in neural information pro- cessing systems, pages 649-657.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Detecting political bias", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Zhao, Abraham Ryzhik, and Nathaniel Lee. Deepnews. ai: Detecting political bias.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "text": "Biased News Generation Procedure.", |
| "type_str": "figure", |
| "num": null |
| }, |
| "TABREF1": { |
| "num": null, |
| "text": "\"All the news\" dataset statistics.", |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "num": null, |
| "text": "Headline: Shaun King is Really Mad About President Trump Domain: Breitbart , Date: June 01, 2017 , Author[s]: Jack Montgomery You know what is really sad? Yet another hate-filled, blame-the-victim tweet from the self-identified Brooklyn blogger and occasional Obama apologist. Wednesday night Shaun King got really fired up on social media, arguing that President Trump \"took out\" Chance the Rapper on Twitter -\"another white, wealthy liberal take, no questions asked.\" At the behest of an enraged social media following he promptly added, in his screen name, \"How dare you christian white men call me racist?\" Trump ditches press pool to play golf Domain: CNN , Date: December 31, 2016 , Author[s]: Eugene Scott (CNN) -President-elect Donald Trump rode a golf cart through the course at his golf course in New Jersey on Saturday before visiting New York City to watch his son Eric Donald Trump give a New Year's Day address. The trip marked the first time Trump has left his Trump Tower residence since he won the November election. Since the election, Trump has visited his golf courses at least once a week. He played golf Friday in New Jersey and Florida and last week in Bedminster, New Jersey. (Bias: -11.01)", |
| "content": "<table><tr><td>(Bias:</td></tr><tr><td>14.21)</td></tr><tr><td>Left Biased</td></tr><tr><td>Headline:</td></tr></table>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "num": null, |
| "text": "", |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "num": null, |
| "text": "Ratio of number of participants who marked machine-generated excerpt as human-written. Number of participants is shown in parentheses.", |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |