| { |
| "paper_id": "Q18-1038", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:10:13.252189Z" |
| }, |
| "title": "Probabilistic Verb Selection for Data-to-Text Generation", |
| "authors": [ |
| { |
| "first": "Dell", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "dell.z@ieee.org" |
| }, |
| { |
| "first": "Jiahao", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Shanghai Key Lab of Trustworthy Computing", |
| "institution": "East China Normal University", |
| "location": { |
| "addrLine": "3663 North Zhongshan Road", |
| "postCode": "200062", |
| "settlement": "Shanghai", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Xiaoling", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Shanghai Key Lab of Trustworthy Computing", |
| "institution": "East China Normal University", |
| "location": { |
| "addrLine": "3663 North Zhongshan Road", |
| "postCode": "200062", |
| "settlement": "Shanghai", |
| "country": "China" |
| } |
| }, |
| "email": "2xlwang@sei.ecnu.edu.cn" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "\u2020", |
| "middle": [], |
| "last": "Birkbeck", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In data-to-text Natural Language Generation (NLG) systems, computers need to find the right words to describe phenomena seen in the data. This paper focuses on the problem of choosing appropriate verbs to express the direction and magnitude of a percentage change (e.g., in stock prices). Rather than simply using the same verbs again and again, we present a principled data-driven approach to this problem based on Shannon's noisy-channel model so as to bring variation and naturalness into the generated text. Our experiments on three large-scale real-world news corpora demonstrate that the proposed probabilistic model can be learned to accurately imitate human authors' pattern of usage around verbs, outperforming the state-of-the-art method significantly.", |
| "pdf_parse": { |
| "paper_id": "Q18-1038", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In data-to-text Natural Language Generation (NLG) systems, computers need to find the right words to describe phenomena seen in the data. This paper focuses on the problem of choosing appropriate verbs to express the direction and magnitude of a percentage change (e.g., in stock prices). Rather than simply using the same verbs again and again, we present a principled data-driven approach to this problem based on Shannon's noisy-channel model so as to bring variation and naturalness into the generated text. Our experiments on three large-scale real-world news corpora demonstrate that the proposed probabilistic model can be learned to accurately imitate human authors' pattern of usage around verbs, outperforming the state-of-the-art method significantly.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Natural Language Generation (NLG) is a fundamental task in Artificial Intelligence (AI) (Russell and Norvig, 2009) . It aims to automatically turn structured data into prose (Reiter, 2007; Belz and Kow, 2009) -the opposite of the better-known field of Natural Language Processing (NLP) that transforms raw text into structured data (e.g., a logical form or a knowledge base) (Jurafsky and Martin, 2009) . Being dubbed \"algorithmic authors\" or \"robot journalists\", NLG systems have attracted a lot of attention in recent years, thanks to the rise of big data (Wright, 2015) .", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 114, |
| "text": "(Russell and Norvig, 2009)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 174, |
| "end": 188, |
| "text": "(Reiter, 2007;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 189, |
| "end": 208, |
| "text": "Belz and Kow, 2009)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 375, |
| "end": 402, |
| "text": "(Jurafsky and Martin, 2009)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 558, |
| "end": 572, |
| "text": "(Wright, 2015)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The use of NLG in financial services has been growing very fast. One particularly important NLG problem for summarizing financial or business data is to automatically generate textual descriptions of trends between two data points (such as stock prices). In this paper, we elect to use relative percentages rather than absolute numbers to describe the change from one data point to another. This is because an absolute number might be considered small in one case but large in another, depending on the unit and the context (Krifka, 2007; . For example, 1000 British pounds are worth much more than 1000 Japanese yen; a rise of 100 US dollars in car price might be negligible but the same amount of increase in bike price would be significant. Given two data points (e.g., on a stock chart), the percentage change can always be calculated easily.", |
| "cite_spans": [ |
| { |
| "start": 524, |
| "end": 538, |
| "text": "(Krifka, 2007;", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The challenge is to select the appropriate verb for any percentage change. For example, in newspapers, we often see headlines like \"Apple's stock had jumped 34% this year in anticipation of the next iPhone . . . \" and \"Microsoft's profit climbed 28% with shift to Web-based software . . . \". The journalists writing such news stories use descriptive language such as the verbs like jump and climb to express the direction and magnitude of a percentage change. It is of course possible to simply keep using the same neutral verbs, e.g., increase and decrease for upward and downward changes respectively, again and again, as in most existing datato-text NLG systems. However, the generated text would sound much more natural if computers could use a variety of verbs suitable in the context like human authors do.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Expressions of percentage changes are readily available in many natural language text datasets and can be easily extracted. Therefore computers should be able to learn from such expressions how people decide which verbs to use for what kind of percentage changes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we address the problem of verb selection for data-to-text NLG through a principled data-driven approach. Specifically, we show how to employ Bayesian reasoning to train a probabilistic model for verb selection based on large-scale realworld news corpora, and demonstrate its advantages over existing verb selection methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of this paper is organized as follows. In Section 2, we review the related work in literature. In Section 3, we describe the dataset used for our investigation. In Section 4, we present our probabilistic model for verb selection in detail. In Section 5, we conduct experimental evaluation. In Section 6, we discuss possible extensions to the proposed approach. In Section 7, we draw conclusions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The most successful NLG applications, from the commercial perspective, have been data-to-text NLG systems which generate textual descriptions of databases or datasets (Reiter, 2007; Belz and Kow, 2009) . A typical example is the automatic generation of textual weather forecasts from weather data that has been used by Environment Canada and UK Met Office (Goldberg et al., 1994; Belz, 2008; Sripada et al., 2014 ). The TREND system (Boyd, 1998) focuses on generating descriptions of historical weather patterns. Their method concentrates primarily on the detection of upward and downward trends in the weather data, and uses a limited set of verbs to describe different types of movements. Ramos-Soto et al. (2013) also address the surface realization of weather trend data by creating an \"intermediate language\" for temperature, wind etc. and then using four different ways to verbalize temperatures based on the minimum, maximum and trend in the time frame considered. An empirical corpus-based study of human-written weather forecasts has been conducted in SUMTIME-MOUSAM (Reiter et al., 2005) , and one aspect of their research focused on verb selection in weather forecasts. They built a classifier to predict the choice of verb based on type (speed vs. direction), information content (change or transition from one wind state to another) and near-synonym choice. There is more and more interest in using NLG to enhance accessibility, for example by describing data in the form of graphs etc. to visually impaired people. In such NLG systems, there has also been exploration into the generation of text for trend data which should be automatically adapted to users' reading levels (Moraes et al., 2014) . There exists wide-spread usage of NLG systems on the financial and business data. For example, the SPOTLIGHT system developed at A.C. Nielsen automatically generated readable English text based on the analysis of large amounts of retail sales data. For another example, in 2016 Forbes reported that FactSet used NLG to automatically write hundreds of thousands of company descriptions a day. It is not difficult to imagine that different kinds of such data-to-text NLG systems can be utilized by a modern chatbot like Amazon Echo or Microsoft XiaoIce (Shum et al., 2018) to enable users access a variety of online data resources via natural language conversation.", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 181, |
| "text": "(Reiter, 2007;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 182, |
| "end": 201, |
| "text": "Belz and Kow, 2009)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 356, |
| "end": 379, |
| "text": "(Goldberg et al., 1994;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 380, |
| "end": 391, |
| "text": "Belz, 2008;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 392, |
| "end": 412, |
| "text": "Sripada et al., 2014", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 433, |
| "end": 445, |
| "text": "(Boyd, 1998)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 691, |
| "end": 715, |
| "text": "Ramos-Soto et al. (2013)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1076, |
| "end": 1097, |
| "text": "(Reiter et al., 2005)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 1688, |
| "end": 1709, |
| "text": "(Moraes et al., 2014)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 2263, |
| "end": 2282, |
| "text": "(Shum et al., 2018)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Typically, a complete data-to-text NLG system implements a pipeline which involves both content selection (\"what to say\") and surface realization (\"how to say\"). In recent years, researchers have made much progress in the end-to-end joint optimization of those two aspects: Angeli et al. (2010) treat the generation process as a sequence of local decisions represented by log-linear models; Konstas and Lapata (2013) employ a probabilistic context-free grammar (PCFG) specifying the structure of the event records and complement it with an n-gram language model as well as a dependency model; the most advanced method to date is the LSTM recurrent neural network (RNN) based encoder-aligner-decoder model proposed by Mei et al. (2016) which is able to learn content selection and surface realization together directly from database-text pairs. The verb selection problem that we focus on in this paper belongs to the lexicalization step of content selection, more specifically, sentence planning. Similar to the above mentioned joint optimization methods, our approach to verb selection is also automatic, unsupervised, and domain-independent. It would be straightforward to generalize our proposed model to select other types of words (like adjectives and adverbs), or even textual templates as used by Angeli et al. (2010) , to describe numerical data. Due to its probabilistic nature, our proposed model could be plugged into, or interpolated with, a bigger end-to-end probabilistic model (Konstas and Lapata, 2013) relatively easily, but it is not obvious how this model could fit into a neural architecture (Mei et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 274, |
| "end": 294, |
| "text": "Angeli et al. (2010)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 717, |
| "end": 734, |
| "text": "Mei et al. (2016)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1304, |
| "end": 1324, |
| "text": "Angeli et al. (2010)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1492, |
| "end": 1518, |
| "text": "(Konstas and Lapata, 2013)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1612, |
| "end": 1630, |
| "text": "(Mei et al., 2016)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The existing work on lexicalization that is most similar to ours is a corpus based method for verb selection developed by at Thomson Reuters. They analyze the usage patterns of verbs expressing percentage changes in a very large corpus, the Reuters News Archive. For each verb, they calculate the interquartile range (IQR) of its associated percentage changes in the corpus. Given a new percentage change, their method randomly selects a verb from those verbs whose IQRs cover the percentage in question, with equal probabilities. A crowdsourcing based evaluation has demonstrated the superiority of their verb selection method to the random baseline that just chooses verbs completely randomly. It is notable that their method has been incorporated into Thomson Reuters Eikon TM , their commercial datato-text NLG software product for macro-economic indicators and mergers-and-acquisitions deals . We will make experimental comparisons between our proposed approach and theirs in Section 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The first (and main) dataset that we have used to investigate the problem of verb selection is BLLIP 1987-89 Wall Street Journal (WSJ) Corpus Release 1 which contains a three-year Wall Street Journal (WSJ) collection of 98,732 stories from ACL/DCI (LDC93T1), approximately 30 million words (Charniak et al., 2000) .", |
| "cite_spans": [ |
| { |
| "start": 290, |
| "end": 313, |
| "text": "(Charniak et al., 2000)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The WSJ Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We first utilized the Stanford CoreNLP 1 (Manning et al., 2014) toolkit to extract \"relation triples\" from all the documents in the dataset, via its open-domain information extraction (OpenIE) functionality. Then, with the help of part-of-speech (POS) tagging provided by the Python package NLTK 2 (Bird et al., 2009) , we filtered the extracted relation triples and retained only those expressing a percentage change in the following format:", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 317, |
| "text": "(Bird et al., 2009)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The WSJ Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Google's revenue subject rose verb 22.2% percentage .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The WSJ Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Here the numerical value of percentage change could be written using either the symbol % or the word percent. Note that all auxiliary verbs (including modal verbs) would have been removed, and lemmatization (Manning et al., 2008; Jurafsky and Martin, 2009) would have been applied to all main verbs so that the different inflectional forms of the same verb would be reduced to their common base form.", |
| "cite_spans": [ |
| { |
| "start": 207, |
| "end": 229, |
| "text": "(Manning et al., 2008;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 230, |
| "end": 256, |
| "text": "Jurafsky and Martin, 2009)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The WSJ Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "After extracting 57,005 candidate triples for a total of 1,355 verbs, we eliminated rare verbs which occur less than 50 times in the dataset. Furthermore, we manually annotated the direction of each verb as upward or downward, and discarded the verbs like yield which do not indicate the direction of percentage change. The above preprocessing left us with 25 (normalized) verbs of which 11 are upward and 14 are downward. There are 21,766 verb-percentage pairs in total.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The WSJ Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Furthermore, it is found that most of the percentage changes in this dataset reside within the range [0%, 100%]. Only a tiny portion of percentage changes are beyond that range: 1.35% for upward verbs and 0.10% for downward verbs. Those out-ofrange percentage changes are considered outliers and are excluded from our study in this paper, though the way to relax this constraint will be discussed later in Section 6.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The WSJ Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We have also validated our model in a widely-used public dataset, the Reuters-21578 text categorization collection 3 . It is a collection of 21,578 documents that appeared on Reuters newswire in 1987. The documents were assembled and indexed with categories, but they were not needed in this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Reuters Corpus", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The same preprocessing as on the WSJ corpus has been applied to this dataset, except that the minimum occurring frequency of verbs was not 50 but 5 times due to the smaller size of this dataset. After manual annotation and filtering, we ended up with 8 verbs including 4 upward ones and 4 downward ones. There are 603 verb-percentage pairs in total.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Reuters Corpus", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Furthermore, to verify the effectiveness of our approach in other languages, we have also made use of the Chinese Gigaword (5th edition) dataset. It is a comprehensive archive of newswire text data that has been acquired from eight distinct sources of Chinese newswire by LDC over a number of years (LDC2011T13), and contains more than 10 million sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Chinese Corpus", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Since we could not find any open-domain information extraction toolkit for \"relation triples\" in Chinese, we resorted to regular expression matching to extract, from Chinese sentences, the expressions of percentage together with their local contexts. A number of regular expression patterns have been utilized to ensure that they could cover all the different ways to write a percentage in Chinese. Then, after POS tagging, we would be able to identify the verb immediately preceding each percentage if it is associated with one.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Chinese Corpus", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For our application, a big difference between Chinese and English is that the available choices of verbs to express upward or downward percentage changes are pretty limited in Chinese: the variation in fact mostly comes from the adverb used together with the verb. Therefore, when we talk about the problem of Chinese verb selection in this paper, we actually mean the choice of not just verbs but instead adverb+verb combinations, e.g., \u72c2\u5347 (rise crazily) and \u7565\u964d (fall slightly). Our proposed probabilistic model for verb selection, described below in Section 4, can be extended straightforwardly to such generalized Chinese \"verbs\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Chinese Corpus", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Similar to the preprocessing of other datasets, rarely occurring verbs with frequency less than 50 would have been filtered out. In the end, we got 18 Chinese verbs of which 14 are upward and 4 are downward. There are 2,829 verb-percentage pairs in total.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Chinese Corpus", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In this section, we propose to formulate the task of verb selection for data-to-text NLG (see Section 1) as a supervised learning problem (Hastie et al., 2009) and to address it using Shannon's noisy-channel model (Shannon, 1948) .", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 159, |
| "text": "(Hastie et al., 2009)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 214, |
| "end": 229, |
| "text": "(Shannon, 1948)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For each of the two possible change directions (upward and downward), we need to build a specific model. Without loss of generality, in the subsequent discussion, we focus on selecting the verbs of one particular direction; the way to deal with the other direction is exactly the same. Thus a percentage change is fully specified by its magnitude in one model. The set-up of our supervised learning problem is as follows. Suppose that we have a set of training ex-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "amples D = {(x 1 , w 1 ), . . . , (x N , w N )},", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where each example consists of a percentage change x i paired with the verb w i used by the human author to express that percentage change. Such training data could be obtained from a large corpus as described in Section 3. Let X denote the set of possible percentage changes: as mentioned earlier, in this paper we assume that X = [0%, 100%]. Let V denote the set of possible verbs, i.e., the vocabulary. Our task is to learn a predictive function f : X \u2192 V that can map any given percentage change x to an appropriate verb w = f (x).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Apparently, there is inherent uncertainty in the above described process of predicting the choice of verbs for a percentage change. Making use of probabilistic reasoning, the principled approach to handling uncertainties, we argue that the function f should be determined by the posterior probability P (w|x). However, it looks difficult to directly estimate the parameters of such a conditional model, aka discriminative model, for every possible value of x which is a continuous variable. Hence, we turn to the easier alternative way often used in machine learning: to construct a generative model. Rather than directly estimating the conditional probability distribution, we instead estimate the joint probability P (x, w) over (x, w) pairs in the generative model. The joint probability can be decomposed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (x, w) = P (w) prior P (x|w) likelihood ,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where P (w) is the prior probability distribution over verbs w, and P (x|w) is the likelihood, i.e., the probability of seeing the percentage change x given that the associated verb is w. The benefit of making the above decomposition is that the parameters of P (w) and P (x|w) can be estimated separately.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Given such a generative model, we can then use the Bayes rule to derive the posterior probability P (w|x) for any new example of x:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (w|x) = P (w)P (x|w) P (x) ,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "P (x) = w\u2208V P (x, w) = w\u2208V P (w)P (x|w) (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "is the model evidence acting as the normalizing constant in the formula.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Intuitively, this generative model could be considered as a noisy-channel (Shannon, 1948) . When we see a percentage change x, we can imagine that it has been generated in two steps (Raviv, 1967) . First, a verb w would be chosen with the prior probability P (w). Second, the verb w would be passed through a communication \"channel\" and be corrupted by the \"noise\" to produce the percentage change x according to the likelihood function (aka the channel model) P (x|w). In other words, the percentage change x that we see is actually the distorted form of its associated verb w.", |
| "cite_spans": [ |
| { |
| "start": 74, |
| "end": 89, |
| "text": "(Shannon, 1948)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 182, |
| "end": 195, |
| "text": "(Raviv, 1967)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "An alternative, but equivalent, interpretation is that when a pair (x, w) is passed through the noisychannel, the verb w will be lost and finally only the percentage change x will be seen. The task is to recover the lost w based on the observed x.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Shannon's noisy-channel model is in fact a kind of Bayesian inference. It has been applied to many NLP tasks such as text categorization, spell checking, question answering, speech recognition, and machine translation (Jurafsky and Martin, 2009) . Our application -probabilistic verb selection -is different from them because the observed data are continuous real-valued numbers but not discrete symbols. More importantly, in most of those applications such as text categorization using the Na\u00efve Bayes algorithm (Manning et al., 2008) , the objective is \"decoding\", i.e., to find the single most likely label w * for any given input x from the model", |
| "cite_spans": [ |
| { |
| "start": 218, |
| "end": 245, |
| "text": "(Jurafsky and Martin, 2009)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 513, |
| "end": 535, |
| "text": "(Manning et al., 2008)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "w * = arg max w\u2208V P (w|x) = arg max w\u2208V P (w)P (x|w)/P (x) = arg max w\u2208V P (w)P (x|w) ,", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "and therefore the normalizing constant P (x) does not need to be calculated. However, this is actually undesirable for the task of verb selection, because it implies that the a percentage change x would always be expressed by the same \"optimal\" verb w * corresponding to it. To achieve variation and naturalness, we must maintain the diversity of word usage. So the right method to generate a verb w for the given percentage change x is to compute the posterior probability distribution P (w|x) over all the possible verbs in the vocabulary V using Eq. 2and then randomly sample a verb from that distribution. Although this means that the normalizing constant P (x) needs to be calculated each time, the computation is still efficient, as unlike in many other applications the vocabulary size |V| is a quite small number in practice (see Section 3).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In the following two subsections, we study the two components of our proposed probabilistic model for verb selection, the prior probability distribution and the likelihood function, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The prior probability distribution P (w) could simply be obtained by maximum likelihood estimation (MLE):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prior", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (w) MLE = N w /N ,", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Prior", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where N w is the number of training examples with the verb w, and N is the total number of training examples. The relationship between a verb's rank and frequency in the WSJ corpus is depicted by the log-log plot Fig. 1 , revealing that the empirical distribution of verbs follows the Zipf's law (Powers, 1998) , which is related to the power law (Adamic, 2000; Newman, 2005) . Specifically, the frequency of the i-th popular verb, f i , is proportional to 1/i s , where s is the exponent characterizing the distribution (shown as the slope of the straight line in the corresponding log-log plot). This implies that in the context of expressing percentage changes, the human choice of verbs is dominated by a few frequently used ones, and many other verbs are only used very occasionally.", |
| "cite_spans": [ |
| { |
| "start": 296, |
| "end": 310, |
| "text": "(Powers, 1998)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 347, |
| "end": 361, |
| "text": "(Adamic, 2000;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 362, |
| "end": 375, |
| "text": "Newman, 2005)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 213, |
| "end": 219, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Prior", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Smoothing: If we would like to intentionally boost the diversity of verb choices, we could mitigate the high skewness of the empirical distribution of verbs by smoothing (Zhai and Lafferty, 2004) . A simple smoothing technique suitable for this purpose is the Jelinek-Mercer smoothing (Jelinek and Mercer, 1980) which uses a linear interpolation between the maximum likelihood estimation of a verb w's prior probability distribution with the uniform distribution over the vocabulary of verbs V, i.e.,", |
| "cite_spans": [ |
| { |
| "start": 170, |
| "end": 195, |
| "text": "(Zhai and Lafferty, 2004)", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 285, |
| "end": 311, |
| "text": "(Jelinek and Mercer, 1980)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prior", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (w) = \u03bbP (w) MLE + (1 \u2212 \u03bb) 1 |V| ,", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Prior", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where P (w) MLE is given by Eq. 5, and the parameter \u03bb \u2208 [0, 1] provides a means to explicitly control the trade-off between accuracy and diversity. The smaller the parameter \u03bb is, the more diverse the generated verbs would be. When \u03bb = 0, the prior probability is completely ignored and the selection of a verb solely depends on how compatible the verb is with the given percentage change. When \u03bb = 1, it backs off to the original model without smoothing. The optimal value of the parameter \u03bb could be tuned on a development set (see Section 5.3).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prior", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For each verb w \u2208 V, we analyze the distribution of its associated percentage changes and calculate the following descriptive statistics: mean, standard deviation (std), skewness, kurtosis, median, and interquartile range (IQR). All those descriptive statistics for the WSJ corpus are given in Table 1 . In addition, Fig. 2 shows the box plots of percentage changes for top-10 (most frequent) verbs in the WSJ corpus, where the rectangular box corresponding to each verb represents the span from the first quartile to the third quartile, i.e., the interquartile range (IQR), with the segment inside the box indicating the median and the whiskers outside the box indicating the rest of the distribution (except for the points that are determined to be \"outliers\" using the so-called Tukey box plot method). It can be seen that the choice of verbs often imply the magnitude of percentage change: some verbs (such as soar and plunge) are mostly used to express big changes (large medians), while some verbs (such as advance and ease) are mostly used to express small changes (small medians). Generally speaking, the former is associated with a relatively wide range of percentage changes (large IQRs) while the latter is associated with a relatively narrow range of percentage changes (small IQRs). Moreover, it is interesting to see that for almost all the verbs, the distribution of percentage changes is heavily skewed to the left side (i.e., smaller changes).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 294, |
| "end": 301, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 317, |
| "end": 323, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Likelihood", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Given a new percentage change x, in order to calculate its probability of being generated from a verb w in the above described generative model, we need to fit the likelihood function, i.e., the probability distribution P (x|w), for each word w \u2208 V, based on the training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Likelihood", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "One common technique for this purpose is kernel density estimation (KDE) (Hastie et al., 2009) , a nonparametric way to estimate the probability density function as follows: where N w is the number of training examples with the verb w, K(\u2022) is the kernel (a non-negative function that integrates to one and has mean zero), and h > 0 is a smoothing parameter called the bandwidth. Fig. 3 shows the likelihood function P (x|w) fitted by KDE with Gaussian kernels and automatic bandwidth determination using the rule of Scott (2015),", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 94, |
| "text": "(Hastie et al., 2009)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 380, |
| "end": 386, |
| "text": "Fig. 3", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Likelihood", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (x|w) = 1 N w h Nw i=1 K x \u2212 x i h ,", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Likelihood", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "for the most popular upward and downward verbs in the WSJ corpus: rise and fall.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Likelihood", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "It is also possible to fit a parametric model of P (x|w) which would be more efficient than KDE. Since in this paper x is assumed to be a continuous random variable within the range [0%, 100%] (see Section 3), we choose to fit P (x|w) with the Beta distribution which is a continuous distribution supported on the bounded interval [0, 1]:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Likelihood", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "P (x|w) = Beta(\u03b1, \u03b2) = \u0393(\u03b1 + \u03b2) \u0393(\u03b1)\u0393(\u03b2) x \u03b1\u22121 (1 \u2212 x) \u03b2\u22121 . (8)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Likelihood", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Although there exist a number of continuous distributions supported on the bounded interval such as the truncated normal distribution, the Beta distribution is picked here as it has the ability to take a great variety of different shapes using only two parameters \u03b1 and \u03b2. These two parameters can be estimated using the method of moments, or maximum likelihood. For example, using the former, we have", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Likelihood", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u03b1 =x x(1\u2212x) v \u2212 1 and \u03b2 = (1 \u2212x) x(1\u2212x) v \u2212 1 ifv <x(1 \u2212x)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Likelihood", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": ", wherex andv are the sample mean and sample variance respectively. Fig. 4 shows the likelihood function P (x|w) fitted by the Beta distribution using SciPy 4 for the most popular upward and downward verbs in the WSJ corpus: rise and fall.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 68, |
| "end": 74, |
| "text": "Fig. 4", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Likelihood", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Thomson Reuters: The only published approach that we are aware of to this specific task of verb selection in the context of data-to-text NLG is the method adopted by Thomson Reuters Eikon TM . This baseline method's effectiveness has been verified through crowdsourcing, as we have mentioned before (see Section 2). Furthermore, it is fairly new (published in 2016), therefore should represent the state of the art in this field. Note that their model was not taken off-the-shelf but re-trained on our datasets to ensure a fair comparison with our approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Neural Network: Another baseline method that we have tried is a feed-forward artificial neural network with hidden layers, aka, a multi-layer perceptron (Russell and Norvig, 2009; Goodfellow et al., 2016) . It is because neural networks are well-known universal function approximators, and they represent quite a different family of supervised learning algorithms. Unlike our proposed probabilistic approach which is essentially a generative model, the neural network used in our experiments is a discriminative model which takes the percentage change input (represented as a single floating-point number) and then predicts the verb choice directly. Since we would like to have probability estimates for each verb, the softmax function was used for the output layer of neurons, and the network was trained via back-propagation to minimize the cross-entropy loss function. An l 2 regularization term was also added to the loss function that would shrink model parameters to prevent overfitting. The activation function was set to the rectified linear unit (ReLU) (Hahnloser et al., 2000) . The Adam optimization algorithm (Kingma and Ba, 2014) was employed as the solver, with the samples shuffled after each iteration. The initial learning rate was set to 0.001, and the maximum number of iterations (epochs) was set to 1500. For our datasets, a single hidden layer of 100 neurons would be sufficient and adding more neurons or layers could not help. This was found using the development set through a line search from 20 to 500 hidden neurons with step size 20. Note that when applying the trained neural network to select verbs, we should use not argmax but sampling from the predicted probability distribution (given by the softmax function), in the same way as we do in our proposed probabilistic model (see Section 4).", |
| "cite_spans": [ |
| { |
| "start": 153, |
| "end": 179, |
| "text": "(Russell and Norvig, 2009;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 180, |
| "end": 204, |
| "text": "Goodfellow et al., 2016)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1062, |
| "end": 1086, |
| "text": "(Hahnloser et al., 2000)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The Python code for our experiments, along with the datasets of verb-percentage pairs extracted from those three corpora (see Section 3), have been made available to the research community 5 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Code", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "5 https://goo.gl/gkj8Fa", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Code", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The end users' perception of a verb selection algorithm's quality depends on not only how accurately the chosen verbs reflect the corresponding percentage changes but also how diverse the chosen verbs are, which are two largely orthogonal dimensions for evaluation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Accuracy: The easiest way to assess the accuracy of an NLG method or system is to compare the texts generated by computers and the texts written by humans for the same input data (Mellish and Dale, 1998; Reiter and Belz, 2009) , using an automatic metric such as BLEU (Papineni et al., 2002) . For our task of verb selection, we decide to use the metric MRR that stands for mean reciprocal rank (Voorhees, 1999; Radev et al., 2002) and can be calculated as follows:", |
| "cite_spans": [ |
| { |
| "start": 179, |
| "end": 203, |
| "text": "(Mellish and Dale, 1998;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 204, |
| "end": 226, |
| "text": "Reiter and Belz, 2009)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 268, |
| "end": 291, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 395, |
| "end": 411, |
| "text": "(Voorhees, 1999;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 412, |
| "end": 431, |
| "text": "Radev et al., 2002)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "MRR = 1 |Q| (x i ,w i )\u2208Q 1 rank(w i ) ,", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Q = {(x 1 , w 1 ), . . . , (x M , w M )}", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "is the set of test examples, and rank(w i ) refers to the rank position of w i -the verb really used by the human author to describe the percentage change x i -in the list of predicted verbs ranked in the descending order of their probabilities of correctness given by the model. The MRR metric is most widely used for the evaluation of automatic question answering which is similar to automatic verb selection in the following sense: they both aim to output just one suitable response (answer or verb) to any given input (question or percentage change).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Through 5-fold cross-validation (Hastie et al., 2009) , we have got the MRR scores of our proposed model (see Section 4) and the two baseline models (see Section 5.1) which are shown in Table 2 . The models were trained/tested separately on each dataset (see Section 3). In each round of 5-fold crossvalidation, 20% of the data would become the test set; in the remaining 80% of the data, randomly selected 60% would be the training set and the other 20% would be the development set if parameter tuning is needed (otherwise the whole 80% would be used for training).", |
| "cite_spans": [ |
| { |
| "start": 32, |
| "end": 53, |
| "text": "(Hastie et al., 2009)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 186, |
| "end": 193, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The parameter \u03bb of our model controls the strength of smoothing over the prior probability (see Section 4.1) and thus dictates the trade-off between accuracy and diversity. If we focus on the accuracy only and ignore the diversity, the optimal value of \u03bb should just be 1 (i.e., no smoothing). In order to strike a healthy balance between accuracy and diversity, we carried out a line search for the value of \u03bb from 0 to 1 with step size 0.05 using the development set. It turned out that the smoothing effect upon diversity would only become noticeable when \u03bb \u2264 0.1, so we further conducted a line search from 0 to 0.1 with step size 0.01, and found that using \u03bb = 0.05 consistently yield a good performance on different corpora. Actually, this phenomenon should not be very surprising, given the Zipfian distribution of verbs which is highly skewed (see Fig. 1 ). Our observation in the experiments still indicate that smoothing with a none-zero \u03bb worked better than setting \u03bb = 0. That is to say, it would not be wise to go to extremes to ignore the prior entirely which would unnecessarily harm the accuracy. An alternative smoothing solution for mitigating the severe skewness of the empirical prior that we also considered is to make the smoothed prior probability proportional to the logarithm of the raw prior probability, but we did not take that route as (i) we could not find a good principled interpretation for such a trick and; (ii) using a small \u03bb value like 0.05 seemed to work sufficiently well. It will be shown later that sampling verbs from the posterior probability distribution rather than just using the one with the maximum probability would help to alleviate the problem of prior skewness and thus prevent verb selection from being dominated by the most popular verbs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 856, |
| "end": 862, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "It can be observed from the experimental results that smoothing (see Section 4.1) does reduce the accuracy of verb selection. The MRR scores with \u03bb = 0.05 are lower than those with \u03bb = 1. Nevertheless, as we shall soon see, strong smoothing is crucially important for achieving a good level of diversity. Furthermore, there seemed to be little performance difference between the usage of the KDE technique or the Beta distribution to fit the likelihood function in our approach. This suggests that the latter is preferable because it is as effective as the former but much more efficient. Therefore, in the remaining part of this paper, we shall focus on this specific version of our model (with \u03bb = 0.05, Beta) even though it may not be the most accurate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The MRR scores achieved by our approach are around 0.4 -0.8 which implies that, on average, the first or the second verb selected by our approach would be the \"correct\" verb used by human authors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Across all the three corpora, our proposed probabilistic model, whether it is smoothed or not, whether it uses the KDE technique or the Beta distribution, outperforms the Thomson Reuters baseline by a large margin in terms of MRR. According to the Wilcoxon signed-rank test (Wilcoxon, 1945; Kerby, 2014) , the performance improvements brought by our approach over the Thomson Reuters baseline are statistically significant with the (two-sided) p-value 0.0001 on the two English corpora and = 0.0027 on the Chinese corpus.", |
| "cite_spans": [ |
| { |
| "start": 274, |
| "end": 290, |
| "text": "(Wilcoxon, 1945;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 291, |
| "end": 303, |
| "text": "Kerby, 2014)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "With respect to the Neural Network baseline, on all the three corpora, its accuracy is slightly better than that of our smoothed model (\u03bb = 0.05) though it still could not beat our original unsmoothed model (\u03bb = 1). The major problem with the Neural Network baseline is that, similar to the probabilistic model without smoothing, its verb choices would concentrate on the most frequent ones and thus have very poor diversity. A prominent advantage of our proposed probabilistic model, in comparison with discriminative learning algorithms such as the Neural Network baseline, is that we are able to explicitly control the trade-off between accuracy and diversity by adjusting the strength of smoothing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "It is worth emphasizing that the accuracy of a verb selection method only reflects its ability to imitate how writers (journalists) use verbs, but this is not necessarily the same as how readers interpret the verbs. Usually the ultimate goal of an NLG system is to successfully communicate information to readers. Previous research in NLG and psychology suggests that there is wide variation in how different people interpret verbs and words in general, which is probably much larger in the general population than amongst journalists. Specifically, the MRR metric would probably underestimate the effectiveness of a verb selection method, since a verb different from the one really used by the writer is not necessarily a less appropriate choice for the corresponding percentage change from the reader's perspective.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Diversity: Other than the accuracy of reproducing the verb choices made by human authors, verb selection methods could also be automatically evaluated in terms of diversity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Following Kingrani et al. (2015), we borrow the diversity measures from ecology (Magurran, 1988) to quantitatively analyze the diversity of verb choices: each specific verb is considered as a particular species. When measuring the biological diversity of a habitant, it is important to consider not only the number of distinct species present but also the relative abundance of each species. In the literature of ecology, the former is called richness and the latter is called evenness. Here we utilize the well-known Inverse Simpson Index aka Simpson's Reciprocal Index (Simpson, 1949) which takes both richness and evenness into account:", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 96, |
| "text": "(Magurran, 1988)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 565, |
| "end": 586, |
| "text": "Index (Simpson, 1949)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "D = R i=1 p 2 i \u22121 , where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "R is the total number of distinct species (i.e., richness), and p i is the the proportion of the individuals belonging to the i-th species relative to the entire population. The evenness is given by the value of diversity normalized to the range between 0 and 1, so it can be calculated as D/R. Table 3 shows the diversity scores of verb choices made by our approach and the Thomson Reuters baseline for 450 randomly sampled percentage changes (see Section 5.4). Overall, in terms of diversity, our approach would lose to Thomson Reuters. The Neural Network baseline is omitted here because its diversity scores were very low.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 295, |
| "end": 302, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Discussion: Figs. 5 and 6 show the confusion matrices of our approach (\u03bb = 0.05, Beta) on the WSJ corpus as (row-normalized) heatmaps: in the former we choose the verb with the highest posterior probability (argmax) while in the latter we sample the verb from the posterior probability distribution (see Section 4). The argmax way would be dominated by a few verbs (e.g., \"rise\", \"soar\", \"fall\", and \"plummet\"). In contrast, random sampling would lead to a much wider variety of verbs. The experimental results of all verb selection methods reported in this paper are generated by the sampling strategy, if not indicated otherwise. It can be seen from Fig. 6 that the verbs \"soar\" and \"plunge\" are the easiest to be predicted. Generally speaking, the prediction of verbs is relatively more accurate for bigger percentage changes, whether upwards or downwards. This is probably because there are fewer verbs available to describe such radical percentage changes (see Fig. 2 ) and thus the model faces less uncertainty. Most misclassification (confusion) happens when a verb is incorrectly predicted to be the most frequent one (\"rise\" or \"fall\").", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 652, |
| "end": 658, |
| "text": "Fig. 6", |
| "ref_id": null |
| }, |
| { |
| "start": 966, |
| "end": 972, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automatic Evaluation", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The two aspects, accuracy and diversity, are both important for the task of verb selection. Although ried out for either accuracy or diversity alone, there is no obvious way to assess the overall effectiveness of a verb selection method using machines only. The ultimate judgment on the quality of verb selection would have to come from human assessors (Mellish and Dale, 1998; Reiter and Belz, 2009; . To manually compare our approach (the version with \u03bb = 0.05, Beta) with a baseline method (Thomson Reuters or Neural Network), we conduct a questionnaire survey with 450 multiple-choice questions. In each question, a respondent would see a pair of generated sentences describing the same percentage change with the verbs selected by two different methods respectively and need to judge which one sounds better than the other (or it is hard to tell). For example, a respondent could be shown the following pair of generated sentences:", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 100, |
| "text": "Although", |
| "ref_id": null |
| }, |
| { |
| "start": 353, |
| "end": 377, |
| "text": "(Mellish and Dale, 1998;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 378, |
| "end": 400, |
| "text": "Reiter and Belz, 2009;", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "(1) Net profit declines 3% (2) Net profit plummets 3%", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "and then they were supposed to choose one of the three following options as their answer:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "[a] Sentence (1) sounds better.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "[b] Sentence (2) sounds better.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "[c] They are equally good. The respondents would be blinded to whether the first verb or the second verb was provided by our proposed method, as their appearing order would have been randomized in advance. The questionnaire survey system withheld the information about the source of each verb until the answers from all respondents had been collected, and then it would count how many times the verb selected by our proposed method was deemed better than (>), worse than (<), or as good as (\u2248) the verb selected by the baseline method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "For each corpus, we produced 150 different questions, of which half were about upward verbs and half were about downward verbs. As we have explained above, each question compares a pair of generated sentences describing the same percentage change with different verbs. The sentence generation process is the same as that used by . The subjects were randomly picked from the most popular ones in the corpus (e.g., \"gross domestic product\"), and the percentage changes (as the objects) were randomly sampled from the corpus as well. Each of the two verb selection methods, in comparison, would provide one verb (as the predicate) for describing that specific percentage change. Note that in this sentence generation process, a pair of sentences would be retained only if the verbs selected by the two methods were different, as it would be meaningless to compare two identical sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "A total of 15 college-educated people participated in the questionnaire survey. They are all bilingual, i.e., native or fluent speakers of both English and Chinese. Each person was given 30 questions: 10 questions (including 5 upward and 5 downward ones) from each corpus. We (the authors of this paper) were excluded from participating in the questionnaire survey to avoid any conscious or unconscious bias.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "The results of human evaluation are shown in Table 4. Altogether, respondents prefer the verb selected by our approach 234/450=52% of times, as opposed to 186/450=41% for the Thomson Reuters baseline; respondents prefer the verb selected by our approach 290/450=64% of times, as opposed to 144/450=32% for the Neural Network baseline. According to the sign test (Wackerly et al., 2007) , our approach works significantly better than the two baseline methods, Thomson Reuters and Neural Network: overall the (two-sided) p-values are less than 0.05. Discussion: Our approach exhibits more superiority over the Thomson Reuters baseline on the English datasets than on the Chinese dataset. Since the Chinese dataset is bigger than the Reuters dataset, though smaller than the WSJ dataset, the performance difference is not caused by corpus size but due to language characteristics. Remember that for Chinese we are actually predicting adverb+verb combinations (see Section 3.3). Retrospective manual inspection of the experimental results suggests that users seem to have relatively higher expectations of diversity for Chinese adverbs than for English verbs.", |
| "cite_spans": [ |
| { |
| "start": 362, |
| "end": 385, |
| "text": "(Wackerly et al., 2007)", |
| "ref_id": "BIBREF46" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Robustness: It is still possible, though very unlikely, for the proposed probabilistic model to generate atypical uses of a verb. A simple measure to avoid such situations is to reject the sampled verb w * if the posterior probability P (w * |x) < \u03c4 where \u03c4 is a predefined threshold, e.g., 5%, and then resample w * until P (w * |x) \u2265 \u03c4 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extensions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Unlimited Range: If the magnitude of a percentage change is allowed to go beyond 100%, we would no longer be able to use the Beta distribution to fit the likelihood function P (x|w) as it is supported on a bounded interval. However, it should be straightforward to use a flexible probability distribution supported on the semi-infinite interval [0, +\u221e], such as the Gamma distribution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extensions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Subject: The context, in particular the subject of the percentage change, has not been taken into account by the presented models. As illustrated by the two example sentences below, the same verb (\"surge\") could be used for quite different percentage changes (\"181%\" vs \"8%\") depending on the subject (\"wheat price\" vs \"inflation\").", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extensions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 \"According to World Bank figures, wheat prices have surged up by 181 percent in the past three years to February 2008.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extensions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 \"While inflation has surged to almost 8% in 2008, it is projected by the Commission to fall in 2009.\" Furthermore, the significance of a percentage change often depends on the domain, and consequently, so does the most appropriate verb to describe a percentage change. For example, a 10% increase in stock price is interesting, while a 10% increase in body temperature is life-threatening. It is, of course, possible to incorporate the subject information into our probabilistic model by extending Eq. 2to P (w|x, s) = P (w, s)P (x|w, s)/P (x, s) where s is the subject word in the triple. On one hand, this should make the model more effective, for the reasons explained above. On the other hand, this would require a lot more data for reliable estimation of the model parameters, which is one of the reasons why we leave it for future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extensions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Language Modeling: Thanks to its probabilistic nature, our proposed model for verb selection could be seamlessly plugged into an n-gram statistical language model (Jurafsky and Martin, 2009) , e.g., for the MSR Sentence Completion Challenge 6 . This might be able to reduce the language model's perplexity, as the probability of subject, verb, percentage triples could be calculated more precisely.", |
| "cite_spans": [ |
| { |
| "start": 163, |
| "end": 190, |
| "text": "(Jurafsky and Martin, 2009)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extensions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Hierarchical Modeling: The choice of verb to describe a particular percentage change could be affected by the style of the author, the topic of the document, and other contextual factors. To take those dimensions into account and build a finer probabilistic model for verb selection, we could embrace Bayesian hierarchical modeling (Gelman et al., 2013; Kruschke, 2014) which, for example, could let each author's model borrow the \"statistical power\" from other authors'.", |
| "cite_spans": [ |
| { |
| "start": 332, |
| "end": 353, |
| "text": "(Gelman et al., 2013;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 354, |
| "end": 369, |
| "text": "Kruschke, 2014)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extensions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Psychology: There exist a lot of studies in psychology on how people interpret probabilities and risks (Reagan et al., 1989; Berry et al., 2004) . They could provide useful insights for further enhancing our verb selection method.", |
| "cite_spans": [ |
| { |
| "start": 103, |
| "end": 124, |
| "text": "(Reagan et al., 1989;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 125, |
| "end": 144, |
| "text": "Berry et al., 2004)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extensions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The major research contribution of this paper is a probabilistic model that can select appropriate verbs to express percentage changes with different directions and magnitudes. This model is not relying on hard-wired heuristics, but learned from training examples (in the form of verb-percentage pairs) that are extracted from large-scale real-world news corpora. The choices of verbs made by the proposed model are found to match our intuitions about how different verbs are collocated with percentage changes of different sizes. The real challenge here is to strike the right balance between accuracy and diversity, which can be realized via smoothing. Our experiments have confirmed that the proposed model can capture human authors' pattern of usage around verbs better than the existing method currently employed by Thomson Reuters Eikon TM . We hope that this probabilistic model for verb selection could help datato-text NLG systems achieve greater variation and naturalness.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": "https://stanfordnlp.github.io/CoreNLP/ 2 http://www.nltk.org/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://goo.gl/NrOfu", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.scipy.org/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://goo.gl/yyKBYa", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The research is partly funded by the National Key R&D Program of China (ID: 2017YFC0803700) and the NSFC grant (No. 61532021). The Titan X Pascal GPU used for our experiments was kindly donated by the NVIDIA Corporation. Prof Xuanjing Huang (Fudan) has helped with the datasets.We thank the anonymous reviewers and the action editor for their constructive and helpful comments. We also gratefully acknowledge the support of Geek.AI for this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Zipf, power-laws, and Pareto -A ranking tutorial", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Lada", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Adamic", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lada A Adamic. 2000. Zipf, power-laws, and Pareto - A ranking tutorial. Technical report, HP Labs.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A simple domain-independent probabilistic approach to generation", |
| "authors": [ |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "502--512", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gabor Angeli, Percy Liang, and Dan Klein. 2010. A simple domain-independent probabilistic approach to generation. In Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 502-512.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "System building cost vs. output quality in data-to-text generation", |
| "authors": [ |
| { |
| "first": "Anja", |
| "middle": [], |
| "last": "Belz", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Kow", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 12th European Workshop on Natural Language Generation (ENLG)", |
| "volume": "", |
| "issue": "", |
| "pages": "16--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anja Belz and Eric Kow. 2009. System building cost vs. output quality in data-to-text generation. In Pro- ceedings of the 12th European Workshop on Natural Language Generation (ENLG), pages 16-24.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Automatic generation of weather forecast texts using comprehensive probabilistic generationspace models", |
| "authors": [ |
| { |
| "first": "Anja", |
| "middle": [], |
| "last": "Belz", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Natural Language Engineering (NLE)", |
| "volume": "14", |
| "issue": "04", |
| "pages": "431--455", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anja Belz. 2008. Automatic generation of weather fore- cast texts using comprehensive probabilistic generation- space models. Natural Language Engineering (NLE), 14(04):431-455.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Over the counter medicines and the need for immediate action: A further evaluation of European Commission recommended wordings for communicating risk", |
| "authors": [ |
| { |
| "first": "Dianne", |
| "middle": [], |
| "last": "Berry", |
| "suffix": "" |
| }, |
| { |
| "first": "Theo", |
| "middle": [], |
| "last": "Raynor", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Knapp", |
| "suffix": "" |
| }, |
| { |
| "first": "Elisabetta", |
| "middle": [], |
| "last": "Bersellini", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Patient Education and Counseling", |
| "volume": "53", |
| "issue": "2", |
| "pages": "129--134", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dianne Berry, Theo Raynor, Peter Knapp, and Elisabetta Bersellini. 2004. Over the counter medicines and the need for immediate action: A further evaluation of Eu- ropean Commission recommended wordings for com- municating risk. Patient Education and Counseling, 53(2):129-134.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Natural Language Processing with Python: Analyzing Text with the Natural Language Toolkit", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| }, |
| { |
| "first": "Ewan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Loper", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Bird, Ewan Klein, and Edward Loper. 2009. Natu- ral Language Processing with Python: Analyzing Text with the Natural Language Toolkit. O'Reilly Media.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "TREND: A system for generating intelligent descriptions of time series data", |
| "authors": [ |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Boyd", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of the 2nd IEEE International Conference on Intelligent Processing Systems (ICIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sarah Boyd. 1998. TREND: A system for generating intelligent descriptions of time series data. In Pro- ceedings of the 2nd IEEE International Conference on Intelligent Processing Systems (ICIPS).", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "BLLIP 1987-89 WSJ Corpus Release 1 LDC2000T43. Web Download. Philadelphia: Linguistic Data Consortium", |
| "authors": [ |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| }, |
| { |
| "first": "Don", |
| "middle": [], |
| "last": "Blaheta", |
| "suffix": "" |
| }, |
| { |
| "first": "Niyu", |
| "middle": [], |
| "last": "Ge", |
| "suffix": "" |
| }, |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Hall", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Hale", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eugene Charniak, Don Blaheta, Niyu Ge, Keith Hall, John Hale, and Mark Johnson. 2000. BLLIP 1987-89 WSJ Corpus Release 1 LDC2000T43. Web Download. Philadelphia: Linguistic Data Consortium.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Bayesian Data Analysis. CRC", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Gelman", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Carlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Stern", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Dunson", |
| "suffix": "" |
| }, |
| { |
| "first": "Aki", |
| "middle": [], |
| "last": "Vehtari", |
| "suffix": "" |
| }, |
| { |
| "first": "Donald", |
| "middle": [], |
| "last": "Rubin", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Gelman, John Carlin, Hal Stern, David Dunson, Aki Vehtari, and Donald Rubin. 2013. Bayesian Data Analysis. CRC, 3rd edition.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Using natural-language processing to produce weather forecasts", |
| "authors": [ |
| { |
| "first": "Eli", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Norbert", |
| "middle": [], |
| "last": "Driedger", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [ |
| "I" |
| ], |
| "last": "Kittredge", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "IEEE Expert", |
| "volume": "9", |
| "issue": "2", |
| "pages": "45--53", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eli Goldberg, Norbert Driedger, and Richard I. Kittredge. 1994. Using natural-language processing to produce weather forecasts. IEEE Expert, 9(2):45-53.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Deep Learning", |
| "authors": [ |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Goodfellow", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ian Goodfellow, Yoshua Bengio, and Aaron Courville. 2016. Deep Learning. MIT Press.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Digital selection and analogue amplification coexist in a cortex-inspired silicon circuit", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [ |
| "R" |
| ], |
| "last": "Richard", |
| "suffix": "" |
| }, |
| { |
| "first": "Rahul", |
| "middle": [], |
| "last": "Hahnloser", |
| "suffix": "" |
| }, |
| { |
| "first": "Misha", |
| "middle": [ |
| "A" |
| ], |
| "last": "Sarpeshkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Rodney", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mahowald", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "Sebastian" |
| ], |
| "last": "Douglas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Seung", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Nature", |
| "volume": "405", |
| "issue": "6789", |
| "pages": "947--951", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard H.R. Hahnloser, Rahul Sarpeshkar, Misha A. Ma- howald, Rodney J. Douglas, and H. Sebastian Seung. 2000. Digital selection and analogue amplification coexist in a cortex-inspired silicon circuit. Nature, 405(6789):947-951.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "The Elements of Statistical Learning: Data Mining, Inference, and Prediction", |
| "authors": [ |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Hastie", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Tibshirani", |
| "suffix": "" |
| }, |
| { |
| "first": "Jerome", |
| "middle": [], |
| "last": "Friedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Trevor Hastie, Robert Tibshirani, and Jerome Friedman. 2009. The Elements of Statistical Learning: Data Min- ing, Inference, and Prediction. Springer, 2nd edition.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Interpolated Estimation of Markov Source Parameters from Sparse Data", |
| "authors": [ |
| { |
| "first": "Frederick", |
| "middle": [], |
| "last": "Jelinek", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Mercer", |
| "suffix": "" |
| } |
| ], |
| "year": 1980, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "381--402", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Frederick Jelinek and Robert Mercer, 1980. Interpolated Estimation of Markov Source Parameters from Sparse Data, pages 381-402. North-Holland Publishing.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Speech and Language Processing: An Introduction to Natural Language Processing, Computational Linguistics and Speech Recognition", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "H" |
| ], |
| "last": "Martin", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Jurafsky and James H. Martin. 2009. Speech and Language Processing: An Introduction to Natural Language Processing, Computational Linguistics and Speech Recognition. Prentice Hall, 2nd edition.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "The simple difference formula: An approach to teaching nonparametric correlation", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Dave", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kerby", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Comprehensive Psychology", |
| "volume": "3", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dave S Kerby. 2014. The simple difference formula: An approach to teaching nonparametric correlation. Com- prehensive Psychology, 3(1).", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Diversity analysis of web search results", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Suneel Kumar Kingrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Dell", |
| "middle": [], |
| "last": "Levene", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "In Proceedings of the Annual International ACM Web Science conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Suneel Kumar Kingrani, Mark Levene, and Dell Zhang. 2015. Diversity analysis of web search results. In Pro- ceedings of the Annual International ACM Web Science conference (WebSci).", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A global model for concept-to-text generation", |
| "authors": [ |
| { |
| "first": "Ioannis", |
| "middle": [], |
| "last": "Konstas", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Journal of Artificial Intelligence Research (JAIR)", |
| "volume": "48", |
| "issue": "", |
| "pages": "305--346", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ioannis Konstas and Mirella Lapata. 2013. A global model for concept-to-text generation. Journal of Artifi- cial Intelligence Research (JAIR), 48:305-346.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Approximate interpretations of number words: A case for strategic communication", |
| "authors": [ |
| { |
| "first": "Manfred", |
| "middle": [], |
| "last": "Krifka", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Cognitive Foundations of Interpretation", |
| "volume": "", |
| "issue": "", |
| "pages": "111--126", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manfred Krifka. 2007. Approximate interpretations of number words: A case for strategic communication. In Cognitive Foundations of Interpretation, pages 111- 126.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Doing Bayesian Data Analysis: A Tutorial with", |
| "authors": [ |
| { |
| "first": "Jags", |
| "middle": [], |
| "last": "John K Kruschke ; R", |
| "suffix": "" |
| }, |
| { |
| "first": "Stan", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John K Kruschke. 2014. Doing Bayesian Data Analysis: A Tutorial with R, JAGS, and Stan. Academic Press, 2nd edition.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Ecological Diversity and Its Measurement", |
| "authors": [ |
| { |
| "first": "Anne", |
| "middle": [ |
| "E" |
| ], |
| "last": "Magurran", |
| "suffix": "" |
| } |
| ], |
| "year": 1988, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anne E. Magurran. 1988. Ecological Diversity and Its Measurement. Princeton University Press.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Introduction to Information Retrieval", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Prabhakar", |
| "middle": [], |
| "last": "Raghavan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher D. Manning, Prabhakar Raghavan, and Hin- rich Sch\u00fctze. 2008. Introduction to Information Re- trieval. Cambridge University Press.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "The Stanford CoreNLP natural language processing toolkit", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jenny", |
| "middle": [ |
| "Rose" |
| ], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mc-Closky", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (ACL), System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "55--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher D. Manning, Mihai Surdeanu, John Bauer, Jenny Rose Finkel, Steven Bethard, and David Mc- Closky. 2014. The Stanford CoreNLP natural language processing toolkit. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguis- tics (ACL), System Demonstrations, pages 55-60.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "What to talk about and how? Selective generation using LSTMs with coarse-to-fine alignment", |
| "authors": [ |
| { |
| "first": "Hongyuan", |
| "middle": [], |
| "last": "Mei", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [ |
| "R" |
| ], |
| "last": "Walter", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "720--730", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hongyuan Mei, Mohit Bansal, and Matthew R. Walter. 2016. What to talk about and how? Selective gen- eration using LSTMs with coarse-to-fine alignment. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL- HLT), pages 720-730.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Evaluation in the context of natural language generation", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Mellish", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Dale", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Computer Speech & Language", |
| "volume": "12", |
| "issue": "4", |
| "pages": "349--373", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Mellish and Robert Dale. 1998. Evaluation in the context of natural language generation. Computer Speech & Language, 12(4):349-373.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Adapting graph summaries to the users' reading levels", |
| "authors": [ |
| { |
| "first": "Priscilla", |
| "middle": [], |
| "last": "Moraes", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathleen", |
| "middle": [], |
| "last": "Mccoy", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandra", |
| "middle": [], |
| "last": "Carberry", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 8th International Natural Language Generation Conference (INLG)", |
| "volume": "", |
| "issue": "", |
| "pages": "64--73", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Priscilla Moraes, Kathleen McCoy, and Sandra Carberry. 2014. Adapting graph summaries to the users' reading levels. In Proceedings of the 8th International Natural Language Generation Conference (INLG), pages 64- 73.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Power laws, Pareto distributions and Zipf's law. Contemporary Physics", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mark", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Newman", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "46", |
| "issue": "", |
| "pages": "323--351", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark E. J. Newman. 2005. Power laws, Pareto distribu- tions and Zipf's law. Contemporary Physics, 46(5):323- 351.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "BLEU: A method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: A method for automatic evalu- ation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics (ACL), pages 311-318.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Interacting with financial data using natural language", |
| "authors": [ |
| { |
| "first": "Vassilis", |
| "middle": [], |
| "last": "Plachouras", |
| "suffix": "" |
| }, |
| { |
| "first": "Charese", |
| "middle": [], |
| "last": "Smiley", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroko", |
| "middle": [], |
| "last": "Bretz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ola", |
| "middle": [], |
| "last": "Taylor", |
| "suffix": "" |
| }, |
| { |
| "first": "Jochen", |
| "middle": [ |
| "L" |
| ], |
| "last": "Leidner", |
| "suffix": "" |
| }, |
| { |
| "first": "Dezhao", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Schilder", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 39th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR)", |
| "volume": "", |
| "issue": "", |
| "pages": "1121--1124", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vassilis Plachouras, Charese Smiley, Hiroko Bretz, Ola Taylor, Jochen L. Leidner, Dezhao Song, and Frank Schilder. 2016. Interacting with financial data us- ing natural language. In Proceedings of the 39th In- ternational ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR), pages 1121-1124.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Applications and explanations of Zipf's law", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "W" |
| ], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Powers", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of the Joint Conferences on New Methods in Language Processing and Computational Natural Language Learning (NeMLaP/CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "151--160", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David MW Powers. 1998. Applications and explanations of Zipf's law. In Proceedings of the Joint Conferences on New Methods in Language Processing and Computa- tional Natural Language Learning (NeMLaP/CoNLL), pages 151-160.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Evaluating web-based question answering systems", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Dragomir", |
| "suffix": "" |
| }, |
| { |
| "first": "Hong", |
| "middle": [], |
| "last": "Radev", |
| "suffix": "" |
| }, |
| { |
| "first": "Harris", |
| "middle": [], |
| "last": "Qi", |
| "suffix": "" |
| }, |
| { |
| "first": "Weiguo", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 3rd International Conference on Language Resources and Evaluation (LREC)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dragomir R. Radev, Hong Qi, Harris Wu, and Weiguo Fan. 2002. Evaluating web-based question answer- ing systems. In Proceedings of the 3rd International Conference on Language Resources and Evaluation (LREC).", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Automatic generation of textual short-term weather forecasts on real prediction data", |
| "authors": [ |
| { |
| "first": "Alejandro", |
| "middle": [], |
| "last": "Ramos-Soto", |
| "suffix": "" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Bugar\u00edn", |
| "suffix": "" |
| }, |
| { |
| "first": "Sen\u00e9n", |
| "middle": [], |
| "last": "Barro", |
| "suffix": "" |
| }, |
| { |
| "first": "Juan", |
| "middle": [], |
| "last": "Taboada", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 10th International Conference on Flexible Query Answering Systems (FQAS)", |
| "volume": "", |
| "issue": "", |
| "pages": "269--280", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alejandro Ramos-Soto, Alberto Bugar\u00edn, Sen\u00e9n Barro, and Juan Taboada. 2013. Automatic generation of textual short-term weather forecasts on real prediction data. In Proceedings of the 10th International Confer- ence on Flexible Query Answering Systems (FQAS), pages 269-280.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Decision making in Markov chains applied to the problem of pattern recognition", |
| "authors": [ |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Raviv", |
| "suffix": "" |
| } |
| ], |
| "year": 1967, |
| "venue": "IEEE Transactions on Information Theory", |
| "volume": "13", |
| "issue": "4", |
| "pages": "536--551", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Josef Raviv. 1967. Decision making in Markov chains applied to the problem of pattern recognition. IEEE Transactions on Information Theory, 13(4):536-551.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Quantitative meanings of verbal probability expressions", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [ |
| "T" |
| ], |
| "last": "Reagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Frederick", |
| "middle": [], |
| "last": "Mosteller", |
| "suffix": "" |
| }, |
| { |
| "first": "Cleo", |
| "middle": [], |
| "last": "Youtz", |
| "suffix": "" |
| } |
| ], |
| "year": 1989, |
| "venue": "Journal of Applied Psychology", |
| "volume": "74", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert T. Reagan, Frederick Mosteller, and Cleo Youtz. 1989. Quantitative meanings of verbal probability ex- pressions. Journal of Applied Psychology, 74(3):433.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "An investigation into the validity of some metrics for automatically evaluating natural language generation systems", |
| "authors": [ |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Reiter", |
| "suffix": "" |
| }, |
| { |
| "first": "Anja", |
| "middle": [], |
| "last": "Belz", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Computational Linguistics", |
| "volume": "35", |
| "issue": "4", |
| "pages": "529--558", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ehud Reiter and Anja Belz. 2009. An investigation into the validity of some metrics for automatically evalu- ating natural language generation systems. Computa- tional Linguistics, 35(4):529-558.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Choosing words in computergenerated weather forecasts", |
| "authors": [ |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Reiter", |
| "suffix": "" |
| }, |
| { |
| "first": "Somayajulu", |
| "middle": [], |
| "last": "Sripada", |
| "suffix": "" |
| }, |
| { |
| "first": "Jim", |
| "middle": [], |
| "last": "Hunter", |
| "suffix": "" |
| }, |
| { |
| "first": "Jin", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Davy", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Artificial Intelligence", |
| "volume": "167", |
| "issue": "1-2", |
| "pages": "137--169", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ehud Reiter, Somayajulu Sripada, Jim Hunter, Jin Yu, and Ian Davy. 2005. Choosing words in computer- generated weather forecasts. Artificial Intelligence, 167(1-2):137-169.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "An architecture for data-to-text systems", |
| "authors": [ |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Reiter", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 11th European Workshop on Natural Language Generation (ENLG)", |
| "volume": "", |
| "issue": "", |
| "pages": "97--104", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ehud Reiter. 2007. An architecture for data-to-text sys- tems. In Proceedings of the 11th European Workshop on Natural Language Generation (ENLG), pages 97- 104.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Artificial Intelligence: A Modern Approach", |
| "authors": [ |
| { |
| "first": "Stuart", |
| "middle": [], |
| "last": "Russell", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Norvig", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stuart Russell and Peter Norvig. 2009. Artificial Intelli- gence: A Modern Approach. Prentice Hall, 3rd edition.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Multivariate Density Estimation: Theory, Practice, and Visualization", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Scott", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David W Scott. 2015. Multivariate Density Estimation: Theory, Practice, and Visualization. John Wiley & Sons.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "A mathematical theory of communication", |
| "authors": [ |
| { |
| "first": "Claude", |
| "middle": [ |
| "E" |
| ], |
| "last": "Shannon", |
| "suffix": "" |
| } |
| ], |
| "year": 1948, |
| "venue": "Bell System Technical Journal", |
| "volume": "27", |
| "issue": "", |
| "pages": "623--656", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Claude E. Shannon. 1948. A mathematical theory of com- munication. Bell System Technical Journal, 27:623- 656.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "From Eliza to XiaoIce: Challenges and opportunities with social chatbots", |
| "authors": [ |
| { |
| "first": "Heung-Yeung", |
| "middle": [], |
| "last": "Shum", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1801.01957" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heung-Yeung Shum, Xiaodong He, and Di Li. 2018. From Eliza to XiaoIce: Challenges and opportunities with social chatbots. arXiv preprint arXiv:1801.01957.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Measurement of diversity", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Edward", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Simpson", |
| "suffix": "" |
| } |
| ], |
| "year": 1949, |
| "venue": "Nature", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edward H Simpson. 1949. Measurement of diversity. Nature.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "When to plummet and when to soar: Corpus based verb selection for natural language generation", |
| "authors": [ |
| { |
| "first": "Charese", |
| "middle": [], |
| "last": "Smiley", |
| "suffix": "" |
| }, |
| { |
| "first": "Vassilis", |
| "middle": [], |
| "last": "Plachouras", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Schilder", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroko", |
| "middle": [], |
| "last": "Bretz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jochen", |
| "middle": [ |
| "L" |
| ], |
| "last": "Leidner", |
| "suffix": "" |
| }, |
| { |
| "first": "Dezhao", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 9th International Natural Language Generation Conference (INLG)", |
| "volume": "", |
| "issue": "", |
| "pages": "36--39", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Charese Smiley, Vassilis Plachouras, Frank Schilder, Hi- roko Bretz, Jochen L. Leidner, and Dezhao Song. 2016. When to plummet and when to soar: Corpus based verb selection for natural language generation. In Pro- ceedings of the 9th International Natural Language Generation Conference (INLG), pages 36-39.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "A case study: NLG meeting weather industry demand for quality and quantity of textual weather forecasts", |
| "authors": [ |
| { |
| "first": "Somayajulu", |
| "middle": [], |
| "last": "Sripada", |
| "suffix": "" |
| }, |
| { |
| "first": "Neil", |
| "middle": [], |
| "last": "Burnett", |
| "suffix": "" |
| }, |
| { |
| "first": "Ross", |
| "middle": [], |
| "last": "Turner", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Mastin", |
| "suffix": "" |
| }, |
| { |
| "first": "Dave", |
| "middle": [], |
| "last": "Evans", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 8th International Natural Language Generation Conference (INLG)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--5", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Somayajulu Sripada, Neil Burnett, Ross Turner, John Mastin, and Dave Evans. 2014. A case study: NLG meeting weather industry demand for quality and quan- tity of textual weather forecasts. In Proceedings of the 8th International Natural Language Generation Con- ference (INLG), pages 1-5.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "The TREC-8 question answering track report", |
| "authors": [ |
| { |
| "first": "Ellen", |
| "middle": [ |
| "M" |
| ], |
| "last": "Voorhees", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of the 8th Text REtrieval Conference (TREC)", |
| "volume": "", |
| "issue": "", |
| "pages": "77--82", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellen M. Voorhees. 1999. The TREC-8 question an- swering track report. In Proceedings of the 8th Text REtrieval Conference (TREC), pages 77-82.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Mathematical Statistics with Applications", |
| "authors": [ |
| { |
| "first": "Dennis", |
| "middle": [], |
| "last": "Wackerly", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Mendenhall", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Scheaffer", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dennis Wackerly, William Mendenhall, and Richard Scheaffer. 2007. Mathematical Statistics with Applica- tions. Nelson Education.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Individual comparisons by ranking methods", |
| "authors": [ |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Wilcoxon", |
| "suffix": "" |
| } |
| ], |
| "year": 1945, |
| "venue": "Biometrics Bulletin", |
| "volume": "1", |
| "issue": "6", |
| "pages": "80--83", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Frank Wilcoxon. 1945. Individual comparisons by rank- ing methods. Biometrics Bulletin, 1(6):80-83.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Algorithmic authors. Communications of the ACM (CACM)", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wright", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "58", |
| "issue": "", |
| "pages": "12--14", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wright. 2015. Algorithmic authors. Communica- tions of the ACM (CACM), 58(11):12-14.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "A study of smoothing methods for language models applied to information retrieval", |
| "authors": [ |
| { |
| "first": "Chengxiang", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Lafferty", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "ACM Transactions on Information Systems (TOIS)", |
| "volume": "22", |
| "issue": "2", |
| "pages": "179--214", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chengxiang Zhai and John Lafferty. 2004. A study of smoothing methods for language models applied to in- formation retrieval. ACM Transactions on Information Systems (TOIS), 22(2):179-214.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "The empirical distribution of verbs P (w) MLE follows the Zipf's law, in the WSJ corpus.", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "The box plots of percentage changes (in %) for the top-10 verbs, in the WSJ corpus.", |
| "num": null |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "The likelihood function P (x|w) fitted by kernel density estimation (KDE). (a) the verb rise (b) the verb fall", |
| "num": null |
| }, |
| "FIGREF4": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "The likelihood function P (x|w) fitted by the Beta distribution.", |
| "num": null |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "text": "" |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "text": "" |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td>522</td></tr></table>", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "text": "The diversity of verb selection measured by the Inverse Simpson Index." |
| }, |
| "TABREF6": { |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "text": "The results of human evaluation, where the p-values are given by the sign test (two-sided)." |
| } |
| } |
| } |
| } |