| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:39:05.851281Z" |
| }, |
| "title": "Script knowledge constrains ellipses in fragments -Evidence from production data and language modeling", |
| "authors": [ |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Lemke", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Saarland University", |
| "location": { |
| "addrLine": "SFB 1102" |
| } |
| }, |
| "email": "robin.lemke@uni-saarland.de" |
| }, |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Sch\u00e4fer", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Saarland University", |
| "location": { |
| "addrLine": "SFB 1102" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Heiner", |
| "middle": [], |
| "last": "Drenhaus", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Saarland University", |
| "location": { |
| "addrLine": "SFB 1102" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ingo", |
| "middle": [], |
| "last": "Reich", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Saarland University", |
| "location": { |
| "addrLine": "SFB 1102" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We investigate the effect of script-based (Schank and Abelson 1977) extralinguistic context on the omission of words in fragments. Our data elicited with a production task show that predictable words are more often omitted than unpredictable ones, as predicted by the Uniform Information Density (UID) hypothesis (Levy and Jaeger, 2007). We take into account effects of linguistic and extralinguistic context on predictability and propose a method for estimating the surprisal of words in presence of ellipsis. Our study extends previous evidence for UID in two ways: First, we show that not only local linguistic context, but also extralinguistic context determines the likelihood of omissions. Second, we find UID effects on the omission of content words.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We investigate the effect of script-based (Schank and Abelson 1977) extralinguistic context on the omission of words in fragments. Our data elicited with a production task show that predictable words are more often omitted than unpredictable ones, as predicted by the Uniform Information Density (UID) hypothesis (Levy and Jaeger, 2007). We take into account effects of linguistic and extralinguistic context on predictability and propose a method for estimating the surprisal of words in presence of ellipsis. Our study extends previous evidence for UID in two ways: First, we show that not only local linguistic context, but also extralinguistic context determines the likelihood of omissions. Second, we find UID effects on the omission of content words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In order to communicate a message, speakers can choose between a full sentence (1a) and nonsentential utterances, or fragments (Morgan, 1973) (1b) . Fragments can convey the same meaning as the corresponding sentence, but lack words that are obligatory in the sentence, like a finite verb. We investigate why people omit particular words in fragments and hypothesize that the choice between omitting and realizing a word is driven by the Uniform Information Density (UID) hypothesis (Levy and Jaeger, 2007) , which has been applied to other omissions, like that of relative pronouns (Levy and Jaeger, 2007) and complementizers (Jaeger, 2010) .", |
| "cite_spans": [ |
| { |
| "start": 127, |
| "end": 146, |
| "text": "(Morgan, 1973) (1b)", |
| "ref_id": null |
| }, |
| { |
| "start": 483, |
| "end": 506, |
| "text": "(Levy and Jaeger, 2007)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 583, |
| "end": 606, |
| "text": "(Levy and Jaeger, 2007)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 627, |
| "end": 641, |
| "text": "(Jaeger, 2010)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": null |
| }, |
| { |
| "text": "(1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": null |
| }, |
| { |
| "text": "Ann and Bill are sharing a pizza. She asks: a. Would you like another slice of pizza? b. Another slice?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": null |
| }, |
| { |
| "text": "Uniform Information Density UID states that information is best distributed uniformly across the utterance. Following Shannon (1949) , the information, or surprisal (Hale, 2001 ), of a word w i is defined as the negative logarithm of its likelihood to appear in context (2).", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 132, |
| "text": "Shannon (1949)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 165, |
| "end": 176, |
| "text": "(Hale, 2001", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": null |
| }, |
| { |
| "text": "(2) S(w i ) = \u2212 log 2 p (w i | context)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": null |
| }, |
| { |
| "text": "Surprisal indexes processing effort (Hale, 2001; Levy, 2008) , and a uniform distribution makes the most efficient use of the hearer's limited cognitive resources. Previous research has shown that the optional omission of function words reflects optimization with respect to UID (e.g. Levy and Jaeger, 2007; Jaeger, 2010) . Optimization consists in two strategies that contribute to a uniform distribution of information: First, omitting uninformative words avoids inefficient local surprisal minima. Second, words that reduce the surprisal of very informative, i.e. unpredictable, following words are more likely to be inserted. If this reasoning also applies to content words like pizza in (2), UID can explain why speakers sometimes use a (specific) fragment rather than a sentence: The fragment is preferred over the sentence if it results from omitting predictable words that are obligatory in the corresponding full sentence.", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 48, |
| "text": "(Hale, 2001;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 49, |
| "end": 60, |
| "text": "Levy, 2008)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 285, |
| "end": 307, |
| "text": "Levy and Jaeger, 2007;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 308, |
| "end": 321, |
| "text": "Jaeger, 2010)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": null |
| }, |
| { |
| "text": "Investigating whether omissions are subject to UID requires (i) a set of linguistic data containing the relevant omissions and (ii) surprisal estimates for both the omitted and realized words in this data set. Given these surprisal estimates, logistic regressions can show whether information-theoretic predictors like surprisal affect the likelihood of a word's omission.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Materials and method", |
| "sec_num": null |
| }, |
| { |
| "text": "Although the term context in (2) in principle comprises both linguistic and extralinguistic context (Levy, 2008) , most of the previous information-theoretic studies on omissions (like the ones cited above) estimated the surprisal of words from corpora with n-gram language models. Such models take only (part of) the linguistic context of the target word into account. How-ever, fragments often occur discourse-initially, so that predictability depends on extralinguistic context that cannot be retrieved from text corpora. Therefore we collected a data set of utterances for tightly controlled script knowledge-based contexts (Schank and Abelson, 1977) with a production task. This data set allows to quantify the effect of both extralinguistic and linguistic context.", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 112, |
| "text": "(Levy, 2008)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 628, |
| "end": 654, |
| "text": "(Schank and Abelson, 1977)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Materials and method", |
| "sec_num": null |
| }, |
| { |
| "text": "Subjects read a story like (3) (original materials in German) and produced the utterance that they considered most likely in that context. Since scripts prime upcoming events (see e.g. Delogu et al., 2018) , they should raise expectations about what will be said in a script-based situation. For instance, in 3, a request to pour the pasta into the pot or to give the speaker the pasta is probable. 3Annika and Jenny want to cook pasta. Annika has put a pot with water on the stove. Then she has turned the stove on. After a few minutes, the water has started to boil. Now Annika says to Jenny:", |
| "cite_spans": [ |
| { |
| "start": 185, |
| "end": 205, |
| "text": "Delogu et al., 2018)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Materials and method", |
| "sec_num": null |
| }, |
| { |
| "text": "In order to use empirically motivated script knowledge representations as stimuli, we based our materials on event chains extracted from DeScript (Wanzare et al., 2016), a crowd-sourced corpus of script knowledge that contains about 100 descriptions of the stereotypical time-course of everyday activities, such as cooking pasta. Following Manshadi et al. 2008, we defined an event as the finite verb and its nominal complement, e.g. put pot in (3). After dependency-parsing the corpus (Stanford parser, Klein and Manning (2003) ) we extracted these event representations from it. We estimated the likelihood of an event given the previous one with bigram language models trained on the manually preprocessed data for each script with the SRILM toolkit (Stolcke, 2002) .", |
| "cite_spans": [ |
| { |
| "start": 504, |
| "end": 528, |
| "text": "Klein and Manning (2003)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 753, |
| "end": 768, |
| "text": "(Stolcke, 2002)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Materials and method", |
| "sec_num": null |
| }, |
| { |
| "text": "We then extracted sequences of three events that were most likely to follow each other and used these event chains to construct our materials. The first sentence in each item introduces the script (cooking pasta), and the next three ones elaborate the event chain (put pot, turn on stove, boil water). For each of 24 items, we collected responses from 100 participants recruited on the crowdsourcing platform Clickworker.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Materials and method", |
| "sec_num": null |
| }, |
| { |
| "text": "As there was a high degree of variation both between scripts and between subjects in the data collected with the production task, we preprocessed the data by manually resolving pronouns and ellipses, lemmatizing the remaining words and finally pooling synonyms to a single lemma. Because we are interested in content words, we removed all function words and adverbials. Removing function words is necessary because e.g. articles and prepositions cannot be freely omitted in standard German (Lemke, 2017; Reich, 2017) and adaptation to UID occurs only \"within the bounds defined by grammar\" (Jaeger, 2010, 25) . Prepositions and distinctive case morphology were annotated on the noun (see (4) for an example), as these features can be important cues towards the meaning intended by the speaker. Adverbials were removed because they can remain implicit in regular sentences and therefore are not involved in the generation of fragments (even though it might be interesting to investigate whether this is subject to UID as well). For the utterance in (4a), preprocessing yields the abstract representation in (4b).", |
| "cite_spans": [ |
| { |
| "start": 490, |
| "end": 503, |
| "text": "(Lemke, 2017;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 504, |
| "end": 516, |
| "text": "Reich, 2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 590, |
| "end": 608, |
| "text": "(Jaeger, 2010, 25)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Production data preprocessing", |
| "sec_num": null |
| }, |
| { |
| "text": "( Pour the pasta into the pot! b. pour pasta in.pot", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Production data preprocessing", |
| "sec_num": null |
| }, |
| { |
| "text": "Investigating the effect of surprisal on omission requires surprisal estimates for both realized and omitted words, therefore we reconstructed all ellipses in the original data. We added those expressions that are minimally required in a full sentence, that is, missing verbs and/or their arguments. This ensures that the outcome of the independent variable, surprisal, is not affected by the dependent variable, omission. The data set for analysis comprises a total of 2.409 sentences consisting in 6.816 primitive expressions (\"words\" in what follows), 1.052 (15.43%) of these words had been omitted in the original data set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Production data preprocessing", |
| "sec_num": null |
| }, |
| { |
| "text": "We investigate potential effects of three measures of surprisal: (i) unigram surprisal, (ii) context-dependent surprisal that takes into account preceding linguistic material within the utterance and (iii) surprisal reduction, i.e. how much inserting a word before a target word reduces its surprisal.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surprisal estimation", |
| "sec_num": null |
| }, |
| { |
| "text": "We estimate the unigram surprisal of each word in the preprocessed data with unigram language models with Good-Turing discount on the preprocessed data that we trained using the SRILM toolkit (Stolcke, 2002) . We trained an individual language model on the data for each script sepa-rately, because this allows to interpret surprisal as conditioned on the script-based situation, i.e. on the extralinguistic context (5):", |
| "cite_spans": [ |
| { |
| "start": 192, |
| "end": 207, |
| "text": "(Stolcke, 2002)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surprisal estimation", |
| "sec_num": null |
| }, |
| { |
| "text": "(5) S(w i ) = \u2212 log 2 p(w i | context extraling. ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surprisal estimation", |
| "sec_num": null |
| }, |
| { |
| "text": "We use a novel method based on Hale (2001) to estimate context-dependent surprisal, that considers preceding words in addition to extralinguistic context. The default method to quantify effects of linguistic context on surprisal are bigram or higher order n-gram models. However, training n-gram models on elliptical data brings along a circularity issue observed by Levy and Jaeger (2007, 852) : If predictable words are omitted more often than unpredictable ones, their corpus frequency is not proportional to their predictability. This problem could be addressed by ellipsis resolution, but training n-gram models on the enriched data set is also not realistic. A trigram model trained on the enriched data set estimates the surprisal of pot in a fragment pour pot, where pasta has been omitted from p(pot | pour pasta). Crucially, this is psychologically implausible, because pasta is not included in the actual linguistic context. Therefore we estimate context-dependent surprisal (and surprisal reduction, see below) with a method based on the approach by Hale (2001) . Hale (2001) derives surprisal from the work done by the human parser, that consists in rejecting all parses that are compatible with the input before but not after processing a word. The larger the total probability mass of the rejected parses is, the more informative is a word. This approach requires to know the likelihood of each parse, i.e. each complete structure, which in our case is equivalent to its relative frequency in the enriched data set. Hale (2001) calculates the surprisal of a word w i as the log ratio between the prefix probability \u03b1, i.e. the total probability mass of the parses compatible with an input, before and after processing w i :", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 42, |
| "text": "Hale (2001)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 367, |
| "end": 394, |
| "text": "Levy and Jaeger (2007, 852)", |
| "ref_id": null |
| }, |
| { |
| "start": 1062, |
| "end": 1073, |
| "text": "Hale (2001)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1076, |
| "end": 1087, |
| "text": "Hale (2001)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1531, |
| "end": 1542, |
| "text": "Hale (2001)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surprisal estimation", |
| "sec_num": null |
| }, |
| { |
| "text": "(6) S(w i ) = log \u03b1 i-1 \u03b1 i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surprisal estimation", |
| "sec_num": null |
| }, |
| { |
| "text": "We modify Hale's approach by allowing for arbitrarily many omissions before and after each word in the input string in order to account for the possibility of ellipses when calculating a word's effect on the set of maintained parses and consequently on \u03b1 i . For instance, processing pour in the fragment pour pot rules out all parses that do not contain pour. Processing pot now excludes all parses that do not contain pot somewhere after pour, independently of whether there is a word like pasta between pour and pot. Surprisal is calculated as (6) based on the prefix probabilities before and after these processing steps. Our approach circumvents the circularity issue because it relies on nonelliptical representations. It is also psychologically realistic because it quantifies the work done by the parser incrementally. Finally, we calculate surprisal reduction, i.e. how much inserting w i reduces the surprisal of w i-1 , for all non-final words. For this purpose, we calculate the ratio between the prefix probability at w i+1 if w i has been realized and the prefix probability at w i+1 if w i has been omitted. In case of the example, how much the surprisal of pot is reduced by inserting pasta is calculated as (7). 7S reduction(pot,pasta) = \u03b1 put ... pot \u03b1 put ... pasta ... pot", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Surprisal estimation", |
| "sec_num": null |
| }, |
| { |
| "text": "We analyzed the data with mixed effects logistic regressions (lme4, Bates et al. (2015) ) that predict the omission of a word in the enriched data set from the surprisal measures. We first conducted separate analyses of unigram and contextdependent surprisal on the complete data set and then an analysis that considers both unigram surprisal and surprisal reduction for non-final words. In principle it would have been desirable to include all three surprisal measures as predictors in a single regression analysis, but, as table 1 shows, in particular context-dependent surprisal is highly correlated with the other two measures.", |
| "cite_spans": [ |
| { |
| "start": 68, |
| "end": 87, |
| "text": "Bates et al. (2015)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "The models in the analyses of unigram surprisal 1 and context-dependent surprisal 2 contained by-script random intercepts and slopes for surprisal and by-subject random intercepts. In both analyses there are significant main effects of the respective predictor, that confirm our hypothesis that predictable words are more likely to be omitted. The effect for unigram surprisal (\u03c7 2 = 7.39, p < .01) is stronger than that of context-dependent surprisal (\u03c7 2 = 4.86, p < .05). The analysis that includes surprisal reduction and unigram surprisal 3 was conducted on a subset of the data that contained those non-final words that were not followed by an ellipsis (55.51% of the total data). The final model has random intercepts for subjects and scripts and contains significant main effects of both predictors. The effect of unigram surprisal (\u03c7 2 = 10.39, p < .01) replicates the analysis of the full data set, and the effect of surprisal reduction (\u03c7 2 = 27.03, p < .001) shows that words that reduce the surprisal of the following word more strongly are more likely to be realized. There is no significant interaction between both predictors (\u03c7 2 = 0.01, p > .9). Discussion Our study confirms the predictions of UID on omissions in fragments: Predictable words are more often omitted in fragments, and words that reduce the surprisal of following ones are more often realized. This extends previous evidence for UID in two ways: First, we find UID effects on the omission of content words. Second, we show that not only local linguistic context, but also extralinguistic context determines the likelihood of omissions. UID however seems not to be the only factor in determining whether fragments are used, as the ratio of fragments varies even between scripts with a similar mean surprisal.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "Our study also shows that event probabilities estimated from a corpus of script knowledge provide a reasonable model of extralinguistic context, to which subjects adapt their lingustic behavior. We also propose a method for estimating by-word surprisal in partially elliptical data in a psychologically realistic way. In our study this required a data set that we collected specfically for this purpose and a large amount of manual preprocessing. Future work could show inhowfar our results can be replicated on larger and less constrained data sets when preprocessing steps like reference and ellipsis resolution as well as the standardization of the production data are automatized.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "Ellipsis \u223c UnigramS + (1+UnigramS|Script) + (1|Subj) 2 Ellipsis \u223c ContextS + (1+ContextS|Script) + (1|Subj)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Fitting Linear Mixed-Effects Models Using lme4", |
| "authors": [ |
| { |
| "first": "Douglas", |
| "middle": [], |
| "last": "Bates", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "M\u00e4chler", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Bolker", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Walker", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Journal of Statistical Software", |
| "volume": "67", |
| "issue": "1", |
| "pages": "1--48", |
| "other_ids": { |
| "DOI": [ |
| "10.18637/jss.v067.i01" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douglas Bates, Martin M\u00e4chler, Ben Bolker, and Steve Walker. 2015. Fitting Linear Mixed-Effects Mod- els Using lme4. Journal of Statistical Software, 67(1):1-48.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "On the predictability of event bound", |
| "authors": [ |
| { |
| "first": "Francesca", |
| "middle": [], |
| "last": "Delogu", |
| "suffix": "" |
| }, |
| { |
| "first": "Heiner", |
| "middle": [], |
| "last": "Drenhaus", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [ |
| "W" |
| ], |
| "last": "Crocker", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.3758/s13421-017-0766-4" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francesca Delogu, Heiner Drenhaus, and Matthew W. Crocker. 2018. On the predictability of event bound-", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "+ (1|Subj) aries in discourse: An ERP investigation", |
| "authors": [ |
| { |
| "first": "\u223c", |
| "middle": [], |
| "last": "Ellipsis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Unigrams * Sreduction", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Memory & Cognition", |
| "volume": "46", |
| "issue": "1|Script", |
| "pages": "315--325", |
| "other_ids": { |
| "DOI": [ |
| "10.3758/s13421-017-0766-4" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellipsis \u223c UnigramS * SReduction (1|Script) + (1|Subj) aries in discourse: An ERP investigation. Memory & Cognition, 46(2):315-325.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A probabilistic Earley parser as a psycholinguistic model", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Hale", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of NAACL", |
| "volume": "2", |
| "issue": "", |
| "pages": "159--166", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1073336.1073357" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Hale. 2001. A probabilistic Earley parser as a psycholinguistic model. In Proceedings of NAACL (Vol. 2), pages 159-166.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Redundancy and reduction: Speakers manage syntactic information density", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Jaeger", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Cognitive Psychology", |
| "volume": "61", |
| "issue": "1", |
| "pages": "23--62", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.cogpsych.2010.02.002" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Florian Jaeger. 2010. Redundancy and reduc- tion: Speakers manage syntactic information den- sity. Cognitive Psychology, 61(1):23-62.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Accurate Unlexicalized Parsing", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 41st Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "423--430", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Klein and Christopher D. Manning. 2003. Accu- rate Unlexicalized Parsing. In Proceedings of the 41st Meeting of the Association for Computational Linguistics, pages 423-430.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Sentential or not? -An experimental study on the syntax of fragments", |
| "authors": [ |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Lemke", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of Linguistic Evidence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robin Lemke. 2017. Sentential or not? -An exper- imental study on the syntax of fragments. In Pro- ceedings of Linguistic Evidence 2016. University of T\u00fcbingen, online publication system.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Expectation-based syntactic comprehension", |
| "authors": [ |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Cognition", |
| "volume": "106", |
| "issue": "3", |
| "pages": "1126--1177", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.cognition.2007.05.006" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roger Levy. 2008. Expectation-based syntactic com- prehension. Cognition, 106(3):1126-1177.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Speakers optimize information density through syntactic reduction", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Roger", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "Florian" |
| ], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jaeger", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "849--856", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roger P. Levy and T. Florian Jaeger. 2007. Speak- ers optimize information density through syntactic reduction. In Bernhard Schl\u00f6kopf, John Platt, and Thomas Hoffman, editors, Advances in Neural In- formation Processing Systems, pages 849-856. MIT Press.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Learning a Probabilistic Model of Event Sequences from Internet Weblog Stories", |
| "authors": [ |
| { |
| "first": "Mehdi", |
| "middle": [], |
| "last": "Manshadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Reid", |
| "middle": [], |
| "last": "Swanson", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew S", |
| "middle": [], |
| "last": "Gordon", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Twenty-First International FLAIRS Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mehdi Manshadi, Reid Swanson, and Andrew S Gor- don. 2008. Learning a Probabilistic Model of Event Sequences from Internet Weblog Stories. In Pro- ceedings of the Twenty-First International FLAIRS Conference.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Sentence fragments and the notion 'sentence", |
| "authors": [ |
| { |
| "first": "Jerry", |
| "middle": [], |
| "last": "Morgan", |
| "suffix": "" |
| } |
| ], |
| "year": 1973, |
| "venue": "Issues in Linguistics. Papers in Honor of Henry and Ren\u00e9e Kahane", |
| "volume": "", |
| "issue": "", |
| "pages": "719--751", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jerry Morgan. 1973. Sentence fragments and the notion 'sentence'. In Braj B. Kachru, Robert Lees, Yakov Malkiel, Angelina Pietrangeli, and Sol Saporta, editors, Issues in Linguistics. Papers in Honor of Henry and Ren\u00e9e Kahane, pages 719-751. University of Illionois Press, Urbana.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "On the omission of articles and copulae in German newspaper headlines. Linguistic Variation", |
| "authors": [ |
| { |
| "first": "Ingo", |
| "middle": [], |
| "last": "Reich", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "17", |
| "issue": "", |
| "pages": "186--204", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ingo Reich. 2017. On the omission of articles and copulae in German newspaper headlines. Linguis- tic Variation, 17(2):186-204.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Scripts, Plans, Goals, and Understanding: An Enquiry into Human Knowledge Structures", |
| "authors": [ |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Schank", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Abelson", |
| "suffix": "" |
| } |
| ], |
| "year": 1977, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roger Schank and Robert Abelson. 1977. Scripts, Plans, Goals, and Understanding: An Enquiry into Human Knowledge Structures. Erlbaum, Hillsdale.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The mathematical theory of communication", |
| "authors": [ |
| { |
| "first": "Claude", |
| "middle": [ |
| "E" |
| ], |
| "last": "Shannon", |
| "suffix": "" |
| } |
| ], |
| "year": 1949, |
| "venue": "The Mathematical Theory of Communication", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Claude E. Shannon. 1949. The mathematical theory of communication. In Claude E. Shannon and Warren Weaver, editors, The Mathematical Theory of Com- munication. The University of Illinois Press, Ur- bana.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "SRILM -an extensible language modeling toolkit", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proc. Intl. Conf. Spoken Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Stolcke. 2002. SRILM -an extensible lan- guage modeling toolkit. In Proc. Intl. Conf. Spoken Language Processing, Denver, Colorado.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "DeScript: A crowdsourced corpus for the acquisition of highquality script knowledge", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "A" |
| ], |
| "last": "Lilian", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandra", |
| "middle": [], |
| "last": "Wanzare", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Zarcone", |
| "suffix": "" |
| }, |
| { |
| "first": "Manfred", |
| "middle": [], |
| "last": "Thater", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pinkal", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of LREC 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "3494--3501", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lilian D. A. Wanzare, Alessandra Zarcone, Stefan Thater, and Manfred Pinkal. 2016. DeScript: A crowdsourced corpus for the acquisition of high- quality script knowledge. In Proceedings of LREC 2016, pages 3494-3501, Portoroz, Slovenia.", |
| "links": null |
| } |
| }, |
| "ref_entries": {} |
| } |
| } |