| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:05:51.116515Z" |
| }, |
| "title": "Hierarchical Encoders for Modeling and Interpreting Screenplays", |
| "authors": [ |
| { |
| "first": "Gayatri", |
| "middle": [], |
| "last": "Bhat", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Bloomberg New York", |
| "location": { |
| "region": "NY", |
| "country": "USA" |
| } |
| }, |
| "email": "gbhat7@bloomberg.net" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "While natural language understanding of longform documents remains an open challenge, such documents often contain structural information that can inform the design of models encoding them. Movie scripts are an example of such richly structured text-scripts are segmented into scenes, which decompose into dialogue and descriptive components. In this work, we propose a neural architecture to encode this structure, which performs robustly on two multi-label tag classification tasks without using handcrafted features. We add a layer of insight by augmenting the encoder with an unsupervised 'interpretability' module, which can be used to extract and visualize narrative trajectories. Though this work specifically tackles screenplays, we discuss how the underlying approach can be generalized to a range of structured documents.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "While natural language understanding of longform documents remains an open challenge, such documents often contain structural information that can inform the design of models encoding them. Movie scripts are an example of such richly structured text-scripts are segmented into scenes, which decompose into dialogue and descriptive components. In this work, we propose a neural architecture to encode this structure, which performs robustly on two multi-label tag classification tasks without using handcrafted features. We add a layer of insight by augmenting the encoder with an unsupervised 'interpretability' module, which can be used to extract and visualize narrative trajectories. Though this work specifically tackles screenplays, we discuss how the underlying approach can be generalized to a range of structured documents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "As natural language understanding of sentences and short documents continues to improve, interest in tackling longer-form documents such as academic papers (Ren et al., 2014; Bhagavatula et al., 2018) , novels (Iyyer et al., 2016) and screenplays (Gorinski and Lapata, 2018) has been growing. Analyses of such documents can take place at multiple levels, e.g. identifying both document-level labels (such as genre) and narrative trajectories (how do levels of humor and romance vary over the course of a romantic comedy?). However, one key challenge for these tasks is the low signal-to-noise ratio in lengthy texts (as indicated by the performance of such models on curated datasets like NarrativeQA (Ko\u010disk\u00fd et al., 2018) ), which makes it difficult to apply end-to-end (E2E) neural network solutions that have recently achieved state-of-the-art on other tasks (Barrault et al., 2019; Williams et al., 2018; Wang et al., 2019 Instead, models either rely on a) a pipeline that provides a battery of syntactic and semantic information from which to craft features (e.g., the BookNLP pipeline (Bamman et al., 2014) for literary text, graph-based features (Gorinski and Lapata, 2015) for movie scripts, or outputs from a discourse parser (Ji and Smith, 2017) for text categorization) and/or b) the linguistic intuitions of the model designer to select features relevant to the task at hand (e.g., rather than ingest the entire text, Bhagavatula et al. (2018) only consider certain sections like the title and abstract of an academic publication). While there is much to recommend these approaches, E2E neural modeling offers several key advantages: it obviates the need for auxiliary feature-generating models, minimizes the risk of error propagation, and offers improved generalization across large-scale corpora. This work explores how the inherent structure of a document class can be leveraged to facilitate an E2E approach. We focus on screenplays, investigating whether we can effectively extract key information by first segmenting them into scenes, and further exploiting the structural regularities within each scene.", |
| "cite_spans": [ |
| { |
| "start": 156, |
| "end": 174, |
| "text": "(Ren et al., 2014;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 175, |
| "end": 200, |
| "text": "Bhagavatula et al., 2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 210, |
| "end": 230, |
| "text": "(Iyyer et al., 2016)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 701, |
| "end": 723, |
| "text": "(Ko\u010disk\u00fd et al., 2018)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 863, |
| "end": 886, |
| "text": "(Barrault et al., 2019;", |
| "ref_id": null |
| }, |
| { |
| "start": 887, |
| "end": 909, |
| "text": "Williams et al., 2018;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 910, |
| "end": 927, |
| "text": "Wang et al., 2019", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 1092, |
| "end": 1113, |
| "text": "(Bamman et al., 2014)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1236, |
| "end": 1256, |
| "text": "(Ji and Smith, 2017)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1431, |
| "end": 1456, |
| "text": "Bhagavatula et al. (2018)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "With an average of >20k tokens per script in our evaluation corpus, extracting salient aspects is far from trivial. Through a series of carefully controlled experiments, we show that a structureaware approach significantly improves document classification by effectively collating sparsely distributed information. Further, this method produces both document-and scene-level embeddings, which can be used downstream to visualize narrative trajectories of interest (e.g., the prominence of various themes across a script). The overarching strategy of this work is to incorporate structural priors as biases into the neural architecture itself (e.g., Socher et al. (2013) , Strubell et al. (2018) , inter alia), whereby, as Henderson (2020) observe, \"locality in the model structure can reflect locality in the linguistic structure\" to boost accuracy over feature-engineering approaches. The methods we propose can readily generalize to any long-form text with an exploitable internal structure, including novels (chapters), theatrical plays (scenes), chat logs (turn-taking), online games (levels/rounds/gameplay events), and academic texts (sections and subsections).", |
| "cite_spans": [ |
| { |
| "start": 649, |
| "end": 669, |
| "text": "Socher et al. (2013)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 672, |
| "end": 694, |
| "text": "Strubell et al. (2018)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 722, |
| "end": 738, |
| "text": "Henderson (2020)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We begin by detailing how a script can be formally decomposed first into scenes and further into granular elements with distinct discourse functions, in \u00a72. We then propose an encoder based on hierarchical attention (Yang et al., 2016 ) that effectively leverages this structure in \u00a73. In \u00a75.3, the predictive performance of the hierarchical encoder is validated on two multi-label tag prediction tasks, one of which rigorously establishes the utility of modeling structure at multiple granularities (i.e. at the level of line, scene, and script). Notably, while the resulting scene-encoded representation is useful for prediction tasks, it is not amenable to easy interpretation or examination. To shed light on the encoded document representations, in \u00a74, we propose an unsupervised interpretability module that can be attached to an encoder of any complexity. \u00a75.5 outlines our application of this module to the scene encoder, and the resulting visualizations of the screenplay, which illustrate how plot elements vary over the course of the narrative arc. \u00a76 draws connections to related work, before concluding.", |
| "cite_spans": [ |
| { |
| "start": 216, |
| "end": 234, |
| "text": "(Yang et al., 2016", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Movie and television scripts (or screenplays) are traditionally segmented into scenes, with a rough rule of thumb being that each scene lasts about a minute on-screen. A scene is not necessarily a distinct narrative unit (which is most often a sequence of several consecutive scenes), but is constituted by a piece of continuous action at a single location. Fig. 1 contains a segment of a scene from the screenplay for the Pulp Fiction, a 1994 American film. These segments tend to follow a standard format. Each scene starts with a scene heading or 'slug line' that briefly describes the scene setting. A sequence of statements follow, and screenwriters typically use formatting to distinguish between dialogue and action statements (Argentini, 1998) . A dialogue identifies the character who utters it either on-or off-screen (the latter is often indicated with '(V.O.)' for voice-over). Parentheticals might be used to include special instructions regarding dialogue delivery. Action statements are all nondialogue constituents of the screenplay \"often used by the screenwriter to describe character actions, camera movement, appearance, and other details\" (Pavel et al., 2015) . In this work, we consider action and dialogue statements, as well as character identities for each dialogue segment, ignoring slug lines and parentheticals.", |
| "cite_spans": [ |
| { |
| "start": 734, |
| "end": 751, |
| "text": "(Argentini, 1998)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1160, |
| "end": 1180, |
| "text": "(Pavel et al., 2015)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 358, |
| "end": 364, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Script Structure", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The large size of a movie script makes it computationally infeasible for recurrent encoders to ingest these screenplays as single blocks of text. Instead, we propose a hierarchical encoder that mirrors the structure of a screenplay ( \u00a72) -a sequence of scenes, each of which is an interwoven sequence of action and dialogue statements. The encoder is three-tiered, as illustrated in Fig. 2 , and processes the text of a script as follows.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 383, |
| "end": 389, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hierarchical Scene Encoders", |
| "sec_num": "3" |
| }, |
| { |
| "text": "First, an action-statement encoder transforms the sequence of words in an action statement (represented by their pretrained word embeddings) into an action statement embedding. Next, an action-scene encoder transforms the chronological sequence of action statement embeddings within a scene into an action scene embedding. Analogously, a dialogue-statement encoder and a dialogue-scene encoder generate dialogue statement embeddings and aggregate them into dialogue scene embeddings. To incorporate character information, characters are represented as embeddings (randomly initialized and updated during model training), and an average of embeddings of all characters with at least one dialogue in the scene is computed. 1 Finally, the action, dialogue and averaged character embeddings for a scene are concatenated into a single scene embedding. Scene-level predictions can be obtained by feeding scene embeddings into a subsequent neural module, e.g. a feedforward layer for supervised tagging. Alternatively, a final script encoder can be used to transform the sequence of scene embeddings into a script embedding representing the entire screenplay.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Architecture", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Dialogue line encoder", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Action line encoder", |
| "sec_num": null |
| }, |
| { |
| "text": "w 1 w 2 w 3 \u2026 w n A 1 A 2 A 3 \u2026 A n", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Action line encoder", |
| "sec_num": null |
| }, |
| { |
| "text": "Action scene encoder A key assumption underlying the model is that action and dialogue statements -as instances of written narrative and spoken language respectivelyare distinct categories of text that must be processed separately. We evaluate this assumption in \u00a75.3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Action line encoder", |
| "sec_num": null |
| }, |
| { |
| "text": "w 1 w 2 w 3 \u2026 w n D 1 D 2 D 3 \u2026 D n", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Action line encoder", |
| "sec_num": null |
| }, |
| { |
| "text": "C 1 C 2 C 3 \u2026 C n", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Action line encoder", |
| "sec_num": null |
| }, |
| { |
| "text": "The proposed model incorporates strong inductive biases regarding the overall structure of input documents. In addition, since the aforementioned encoders \u00a73.1 are underspecified, we evaluate three instantiations of the encoder components:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoders", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "1. Sequential (GRU): A bidirectional GRU (Bahdanau et al., 2015) encodes input sequences (of words, statements or scenes). Given a sequence of input embeddings e 1 , . . . , e T , we obtain GRU outputs c 1 , . . . , c T , and use c T as the recurrent encoder's final output.", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 64, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoders", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Attention (Bahdanau et al., 2015 ) is used to combine c 1 , . . . , c T . This allows more or less informative inputs to be filtered accordingly. We calculate attention weights using a parametrized vector p of the same dimensionality as the GRU outputs (Sukhbaatar et al., 2015; Yang et al., 2016) :", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 32, |
| "text": "(Bahdanau et al., 2015", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 253, |
| "end": 278, |
| "text": "(Sukhbaatar et al., 2015;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 279, |
| "end": 297, |
| "text": "Yang et al., 2016)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequential with Attention (GRU + Attn):", |
| "sec_num": "2." |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b1 i = p T c i \u03a3 T j=1 p T c j", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Sequential with Attention (GRU + Attn):", |
| "sec_num": "2." |
| }, |
| { |
| "text": "These weights are used to compute the final encoder output:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequential with Attention (GRU + Attn):", |
| "sec_num": "2." |
| }, |
| { |
| "text": "c = \u03a3 T j=1 \u03b1 i c i (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequential with Attention (GRU + Attn):", |
| "sec_num": "2." |
| }, |
| { |
| "text": "3. Bag-of-Embeddings with Attention (BoE + Attn): These encoders disregard sequential information to compute an attention-weighted average of the encoder's inputs:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequential with Attention (GRU + Attn):", |
| "sec_num": "2." |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b1 i = p T e i \u03a3 T j=1 p T e j (3) c = \u03a3 T j=1 \u03b1 i e i", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Sequential with Attention (GRU + Attn):", |
| "sec_num": "2." |
| }, |
| { |
| "text": "In contrast, a bag-of-embeddings (BoE) encoder computes a simple average of its inputs. While defining a far more constrained function space than recurrent encoders, BoE and BoE + Attn representations have the advantage of remaining in the input word embedding space. We leverage this property in \u00a74 where we develop an interpretability layer on top of the encoder outputs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequential with Attention (GRU + Attn):", |
| "sec_num": "2." |
| }, |
| { |
| "text": "The final script embedding is passed into a feedforward classifier (FFNN). As both supervised learning tasks in our evaluation are multi-label classification problems, we use a variant of a simple multi-label one-versus-rest loss, where correlations among tags are ignored. The tag sets have high cardinalities and the fractions of positive samples are inconsistent across tags (see Appendix Tables 3 & 4) ; this motivates the use of a reweighted loss function:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 383, |
| "end": 406, |
| "text": "Appendix Tables 3 & 4)", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Loss for Tag Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L(y, z) = 1 N L \u03a3 N i=1 \u03a3 L j=1 [y ij log \u03c3(z ij ) + \u03bb j (1 \u2212 y ij )(1 \u2212 log \u03c3(z ij ))]", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Loss for Tag Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where N is the number of samples, L is the number of tag labels, y \u2208 {0, 1} is the target label, z is the output of the FFNN, \u03c3 is the sigmoid function, and \u03bb j is the ratio of positive to negative samples (precomputed over the entire training set, since the development set is too small to tune this parameter) for the j th tag label. With this loss function, we account for label imbalance without tuning separate thresholds for each tag on the validation set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Loss for Tag Classification", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "As the complexity of learning methods used to encode sentences and documents has increased, so has the need to understand the properties of the encoded representations. Probing methods (Linzen et al., 2016; Conneau et al., 2018) gauge the information captured in an embedding by evaluating its performance on downstream classification tasks, either with manually collected annotations (Shi et al., 2016) or self-supervised proxies (Adi et al., 2016) . In our case, it is laborious and expensive to collect such annotations at the scene level (requiring domain experts), and the proxy evaluation tasks proposed in literature do not probe the narrative properties we wish to surface. Instead, we take inspiration from Iyyer et al. (2016) to learn an unsupervised scene descriptor model that can be trained without relying on such annotations. Using a dictionary learning technique (Olshausen and Field, 1997), the model learns to represent each scene embedding as a weighted mixture of various topics estimated over the entire corpus. It thus acts as an 'interpretability layer' that can be applied over the scene encoder. This model is similar in spirit to dynamic topic models (Blei and Lafferty, 2006) , with the added advantage of producing topics that are both more coherent and more interpretable than those generated by LDA (He et al., 2017; Mitcheltree et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 185, |
| "end": 206, |
| "text": "(Linzen et al., 2016;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 207, |
| "end": 228, |
| "text": "Conneau et al., 2018)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 385, |
| "end": 403, |
| "text": "(Shi et al., 2016)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 431, |
| "end": 449, |
| "text": "(Adi et al., 2016)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 716, |
| "end": 735, |
| "text": "Iyyer et al. (2016)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1177, |
| "end": 1202, |
| "text": "(Blei and Lafferty, 2006)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1329, |
| "end": 1346, |
| "text": "(He et al., 2017;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1347, |
| "end": 1372, |
| "text": "Mitcheltree et al., 2018)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interpreting Scene Embeddings", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The model has three main components: a scene encoder whose outputs we wish to interpret, a set of topics or descriptors that are the 'basis elements' used to describe an interpretable scene, and a predictor that predicts weights over descriptors for a given scene embedding. The scene encoder uses the text of a given scene s t to produce a corresponding scene embedding v t . This encoder can take any form -from an extractor that derives a hand-crafted feature set from the scene text, as in Gorinski and Lapata (2018), to the scene encoder in \u00a73.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scene Descriptor Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "To probe the contents of scene embedding v t , we compute a descriptor-based representation w t \u2208 R d in terms of a descriptor matrix R \u2208 R k\u00d7d that stores k topics or descriptors:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scene Descriptor Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "o t = softmax(f (v t )) (6) w t = R T o t", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scene Descriptor Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where o t \u2208 R k is the weight (probability) vector over k descriptors and f (v t ) is a predictor (illustrated by the leftmost pipeline in Fig. 3 ) which con-", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 139, |
| "end": 145, |
| "text": "Fig. 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Scene Descriptor Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "verts v t into o t . Two variants are f = FFNN(v t ) and f = FFNN([v t ; o t\u22121 ]) (concatenation)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scene Descriptor Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "; we use the former in \u00a75.5. Furthermore, we can incorporate additional recurrence into the model by modifying Eq. 6 to add the previous state:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scene Descriptor Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "o t =(1 \u2212 \u03b1) \u2022 softmax(FFNN([v t ; o t\u22121 ])) + \u03b1 \u2022 o t\u22121", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Scene Descriptor Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Descriptors are initialized either randomly (Glorot and Bengio, 2010) or with the centroids of a k-means clustering of the input word embeddings. For the predictor, f is a two-layer FFNN with ReLU activations and a softmax layer that transforms v t (from the scene encoder) into a 100dimensional intermediate state and then into o t .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Scene Descriptor Model", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We wish to minimize the reconstruction error between two scene representations: (1) the descriptorbased embedding w t which depends on the scene embedding v t , and (2) an attention-weighted bagof-words embedding for s t . This encourages the computed descriptor weights to be indicative of the scene's actual content (the portions of its text that indicate attributes of interest such as genre, plot, and mood). We use a BoE+Attn scene encoder ( \u00a73.2) pretrained on the tag classification task (bottom right of Fig. 3 ), which yields a vector u t \u2208 R d for scene s t . The scene descriptor model is then trained using a hinge loss objective (Weston et al., 2011) to minimize the reconstruction error between w t and u t , with an additional orthogonality constraint on R to encourage semantically distinct descriptors:", |
| "cite_spans": [ |
| { |
| "start": 642, |
| "end": 663, |
| "text": "(Weston et al., 2011)", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 512, |
| "end": 518, |
| "text": "Fig. 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Reconstruction Task", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L =\u03a3 n j=1 max(0, 1 \u2212 w T t u t + w T t u j ) + \u03bb RR T \u2212 I 2", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Reconstruction Task", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where u 1 . . . u n are n negative samples selected from other scenes in the same screenplay.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reconstruction Task", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We use a BoE+Attn scene encoder as a \"target\" u t to force w t (and therefore the rows in R) in the same space as the input word embeddings. Thus, a given descriptor can be semantically interpreted by querying its nearest neighbors in the word embedding space. The predicted descriptor weights for a scene s t are obtained by running a forward pass through the model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reconstruction Task", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We evaluate the proposed script encoder and its variants through two supervised multilabel tag prediction tasks, and a qualitative analysis via the unsupervised extraction of descriptor trajectories.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We base our evaluation on the ScriptBase-J corpus released by Gorinski and Lapata (2018) to directly compare our approach with the multilabel encoder proposed in Gorinski and Lapata (2018) and to provide an open-source evaluation standard. 2 In this corpus, each movie is associated with a set of expert-curated tags that range across 6 tag attributes: mood, plot, genre, attitude, place, and flag; in addition, we also evaluate on an internal dataset of labels assigned to the same movies by in-house domain experts, across 3 tag attributes: genre, plot, and mood. The two taxonomies are distinct. (See Appendix Table 3 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 613, |
| "end": 620, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "As in Pavel et al. (2015) , we leverage the standard screenplay format (Argentini, 1998) to extract structured representations of scripts (formatting cues included capitalization and tab-spacing; see Fig. 1 and Table 1 for an example). Filtering erroneously processed scripts removes 6% of the corpus, resulting in a total of 857 scripts. We hold out 20% (172) scripts for evaluation and use the 2 https://github.com/EdinburghNLP/scriptbase rest for training. The average number of tokens per script is around 23k; additional statistics are shown in Appendix Table 1 .", |
| "cite_spans": [ |
| { |
| "start": 6, |
| "end": 25, |
| "text": "Pavel et al. (2015)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 71, |
| "end": 88, |
| "text": "(Argentini, 1998)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 200, |
| "end": 206, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 211, |
| "end": 218, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 559, |
| "end": 566, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Script Preprocessing", |
| "sec_num": null |
| }, |
| { |
| "text": "To keep within GPU memory limits, we split extremely long scenes to retain no more than 60 action and 60 dialogue lines per scene. The vocabulary is composed of words with at least 5 occurrences across the script corpus. The number of scripts per tag value ranges from high (e.g. for some Genre tags) to low (for most Plot and Mood tags) in both datasets (see Appendix Table 4 ), which along with high tag cardinality for each attribute motivates the use of the reweighted loss in Eq. 5.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 360, |
| "end": 377, |
| "text": "Appendix Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Script Preprocessing", |
| "sec_num": null |
| }, |
| { |
| "text": "All inputs to the hierarchical scene encoder are 100-dimensional GloVe embeddings (Pennington et al., 2014) . 3 Our sequential models are bi-GRUs with a single 50-dimensional hidden layer in each direction, resulting in 100-dimensional outputs. The attention parameter p is 100-dimensional; BoE models naturally output 100-dimensional representations, and character embeddings are 10dimensional. The script encoder's output is passed through a linear layer with sigmoid activation and binarized by thresholding at 0.5.", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 107, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 110, |
| "end": 111, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "One simplification we use is to utilize the same encoder type for all encoders described in \u00a73.1. However, particular encoder types might suit different tiers of the architecture: e.g. scene embeddings could be aggregated in a permutationinvariant manner, since narratives are interwoven and scenes may not be truly sequential.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We implement the script encoder on top of Al-lenNLP (Gardner et al., 2017) and PyTorch (Paszke et al., 2019) , and all experiments are conducted on an AWS p2.8xlarge machine. We use the Adam optimizer with an initial learning rate of 0.005, clip gradients at a maximum norm of 5, and use no dropout. The model is trained for up to 20 epochs to maximize average precision score, with early stopping if the validation metric does not improve for 5 consecutive epochs.", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 74, |
| "text": "(Gardner et al., 2017)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 87, |
| "end": 108, |
| "text": "(Paszke et al., 2019)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "ScriptBase-J also comes with loglines, or short, 1-2 sentence human-crafted summaries of the movie's plot and mood (see Appendix Table 2 ). A model trained on these summaries can be expected to provide a reasonable baseline for tag prediction, since logline curators are likely to highlight information relevant to this task. The Loglines model is a bi-GRU with inputs of size 100 (GloVe embeddings) and hidden units of size 50 in each direction, whose output feeds into a linear classifier. 4", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 129, |
| "end": 136, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tag Prediction Experiments", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Genre Plot Mood Loglines 49.9 (0.8) 12.7 (0.9) 17.5 (0.2) Comparing encoder variations: BoE 49.0 (1.1) 8.3 (0.6) 12.9 (0.7) BoE + Attn 51.9 (2.3) 11.3 (0.4) 16.3 (0.6) GRU 57.9 (1.9) 13.0 (1.3) 19.1 (1.0) GRU + Attn 60.5 (2.0) 15.2 (0.4) 22.9 (1.4) Variants on GRU + Attn for action & dialog: + Chars 62.5 (0.7) 11.7 (0.3) 18.2 (0.3) -Action 60.5 (2.9) 13.5 (1.4) 20.0 (1.2) -Dialogue 60.5 (0.6) 13.4 (1.7) 19.1 (1.4) 2-tier 61.3 (2.3) 13.7 (1.7) 20.6 (1.2) HAN 61.5 (0.6) 14.2 (1.7) 20.7 (1.4) Table 2 : Investigation of the effects of different architectural (BoE +/-Attn, GRU +/-Attn) and structural choices on a tag prediction task, using an internally tagged dataset: F-1 scores with sample standard deviation in parentheses. Across the 3 tag attributes we find that modeling sentential and scene-level structure helps, and attention helps extract representations more salient to the task at hand. Table 2 contains results for the tag prediction task on our internally-tagged dataset. First, a set of models trained using action and dialogue inputs are used to evaluate the architectural choices in \u00a73.1. We find that modeling recurrence at sentential and scene levels and selecting relevant words/scenes with attention are prominent factors in the robust improvement over the Loglines baseline (see the first five rows in Table 2) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 495, |
| "end": 502, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 903, |
| "end": 910, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1328, |
| "end": 1336, |
| "text": "Table 2)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "Next, we assess the effect that various structural elements of a screenplay have on classification performance. Notably, the difficulty of the prediction task is directly related to the number of labels per tag attribute: higher-cardinality tag attributes with correlated tag values (like plot and mood) are far more difficult to predict than lower-cardinality tags with more discriminable values (like genre). We find that adding character information to the bestperforming GRU + Attn model (+Char) improves prediction of genre, while using both dialogue and action statements improves performance on plot and mood when compared to using only one or the other. We also evaluate (1) a 2-tier variant of the GRU+Attn model without action/dialoguestatement encoders (i.e., all action statements are concatenated into a single sequence of words and passed into the action-scene encoder, and similarly with dialogue) and (2) a variant similar to Yang et al. (2016) (HAN) that does not distinguish between action and dialogue (i.e., all statements in a scene are encoded using a single statement encoder and statement embeddings are passed to a scene encoder, the output of which is passed into the script encoder). Both models perform slightly better than GRU+Attn on genre, but worse on plot and mood, indicating that incorporating hierarchy and distinguishing between dialogue and action statements helps on the more difficult prediction tasks. For the results in Table 3 , we compared the GRU+Attn configuration in Table 2 (HSE) with an implementation of Gorinski and Lapata (2018) (G&L) that was run on the previous train-test split. G&L contains a number of handcrafted lexical, graph-based, and interactive features that were designed for optimal performance on screenplay analysis. In contrast, HSE directly encodes standard screenplay structure into a neural network architecture, and is an alternative, arguably more lightweight way of building a domain-specific textual representation. Our results are comparable, with the exception of 'place', which can often be identified deterministically from scene headings. Tables 2 and 3 check for an exact match between predicted and true tag values to report standard multi-label F-1 scores (one-vs-rest classification evaluation, micro-averaged over tag attributes). However, the characteristics of our tag taxonomies suggest that this measure may not be ideal, since human-crafted tag sets include dozens of highly correlated, overlapping values, and the dataset includes instances of missing tags. A standard scoring procedure may underestimate model performance when, e.g., a prediction of 'Crime' is equally penalized for a target labels of 'Heist' and 'Romance' (see Appendix Table 5 ).", |
| "cite_spans": [ |
| { |
| "start": 942, |
| "end": 960, |
| "text": "Yang et al. (2016)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 961, |
| "end": 966, |
| "text": "(HAN)", |
| "ref_id": null |
| }, |
| { |
| "start": 1462, |
| "end": 1469, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 1514, |
| "end": 1527, |
| "text": "Table 2 (HSE)", |
| "ref_id": null |
| }, |
| { |
| "start": 2120, |
| "end": 2134, |
| "text": "Tables 2 and 3", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 2731, |
| "end": 2738, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "We use a similarity-based scoring procedure (see Maynard et al. (2006) for related approaches) to assess the impact of such effects. In particular, we calculate cosine similarities between tag embeddings trained on a similar task (see Appendix for details) and evaluate a prediction based the percentile of its similarity to the actual label. Such a measure takes into account the latent relationships among tags via similarity thresholding, wherein a prediction is counted as correct if it is sufficiently similar to the target. The percentile cutoff can be varied to estimate model performance as a function of the threshold percentile.", |
| "cite_spans": [ |
| { |
| "start": 49, |
| "end": 70, |
| "text": "Maynard et al. (2006)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results in", |
| "sec_num": null |
| }, |
| { |
| "text": "In Fig. 4 we re-evaluate the GRU + Attn model outputs (row 5 in Table 2 ) with this evaluation metric to examine how our results might vary if we adopted a similarity-based scoring procedure. When the similarity percentile cutoff equals 100, the result is identical to the standard F-1 score. Even decreasing the cutoff to the 90 th percentile shows striking improvements for high-cardinality attributes (180% for mood and 250% for plot). Notably, using a similarity-based scoring procedure for complex tag taxonomies may yield results that more accurately reflect human perception of the model's performance (Maynard et al., 2006) .", |
| "cite_spans": [ |
| { |
| "start": 609, |
| "end": 631, |
| "text": "(Maynard et al., 2006)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 9, |
| "text": "Fig. 4", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 64, |
| "end": 71, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results in", |
| "sec_num": null |
| }, |
| { |
| "text": "To extract narrative trajectories with the scene descriptor model, we analyze the scene encoder from the GRU+Attn model, which performs best on the Plot and Mood tag attributes and does reasonably well on Genre. Similarly to Iyyer et al. (2016) , we limit the input vocabulary for the BoE+Attn encoders that yield target vectors u t to words occurring in at least 50 movies (7.3% of the training set), while also filtering the 500 most frequent words in the corpus. We set the number of descriptors k to 25 to allow for a wide range of topics while keeping manual examination feasible.", |
| "cite_spans": [ |
| { |
| "start": 225, |
| "end": 244, |
| "text": "Iyyer et al. (2016)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Qualitative Scene-level Analysis", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "Further modeling choices are evaluated using the semantic coherence metric (Mimno et al., 2011) , which assesses the quality of word clusters induced by topic modeling algorithms. These choices include: the presence of recurrence in the predictor (i.e. toggling between Eqns. 6 and 7, with \u03b1 = 0.5) and the value of hyperparameter \u03bb. While the kmeans initialized descriptors score slightly higher on semantic coherence, they remain close to the initial centroids and do not reflect the corpus as well as the randomly initialized version, which is the initialization we eventually used. We also find that incorporating recurrence and \u03bb = 10 (tuned using simple grid search) result in the highest coherence.", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 95, |
| "text": "(Mimno et al., 2011)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Qualitative Scene-level Analysis", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "The outputs of the scene descriptor model are shown in Table 4 and Figure 5 . Table 4 presents five example descriptors, each identified by the representative words closest to them in the word embedding space (topic names are manually annotated). Figure 5 presents the narrative trajectories of a subset of descriptors for three screenplays: Pretty Woman, Pulp Fiction, and Pearl Harbor, using a streamgraph (Byron and Wattenberg, 2008) . The descriptor weight o t (Eq. 6) as a function of scene number/order is rescaled and smoothed, with the width of a color band indicating the weight value. A critical event for each screenplay is indicated by a letter on each trajectory. A qualitative analysis of such events indicates general alignment between scripts and their topic trajectories, and the potential applicability of this method to identifying significant moments in long-form documents. Table 4 .", |
| "cite_spans": [ |
| { |
| "start": 408, |
| "end": 436, |
| "text": "(Byron and Wattenberg, 2008)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 55, |
| "end": 62, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 67, |
| "end": 75, |
| "text": "Figure 5", |
| "ref_id": "FIGREF5" |
| }, |
| { |
| "start": 78, |
| "end": 85, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 247, |
| "end": 255, |
| "text": "Figure 5", |
| "ref_id": "FIGREF5" |
| }, |
| { |
| "start": 895, |
| "end": 902, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Qualitative Scene-level Analysis", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "Computational narrative analysis of large texts has been explored in a range of contexts (Mani, 2012) over the past few decades (Lehnert, 1981) . Recent work has analyzed narrative from plot (Chambers and Jurafsky, 2008; Goyal et al., 2010) and character (Elsner, 2012; Bamman et al., 2014) perspectives. While movie narratives have received attention (Bamman et al., 2013; Chaturvedi et al., 2018; Kar et al., 2018) , the computational analysis of entire screenplays is not as common.", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 101, |
| "text": "(Mani, 2012)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 128, |
| "end": 143, |
| "text": "(Lehnert, 1981)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 191, |
| "end": 220, |
| "text": "(Chambers and Jurafsky, 2008;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 221, |
| "end": 240, |
| "text": "Goyal et al., 2010)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 255, |
| "end": 269, |
| "text": "(Elsner, 2012;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 270, |
| "end": 290, |
| "text": "Bamman et al., 2014)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 352, |
| "end": 373, |
| "text": "(Bamman et al., 2013;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 374, |
| "end": 398, |
| "text": "Chaturvedi et al., 2018;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 399, |
| "end": 416, |
| "text": "Kar et al., 2018)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Notably, Gorinski and Lapata (2015) introduced a summarization method for scripts, extracting graph-based features that summarize the key scene sequences. Gorinski and Lapata (2018) built on top of this work, crafting additional features for a specially-designed multilabel encoder, while also emphasizing the difficulty of the tag prediction task. Our work suggests an orthogonal approach using automatically learned scene representations instead of feature-engineered inputs. We also consider the possibility that at least some of the task difficulty owes not to the length or richness of the text, but rather to the complexity of the tag taxonomy. The pattern of results we obtain from a similarity-based scoring measure offers a brighter picture of model performance, and suggests that the standard multilabel F1 measure may not be appropriate for such complex tag sets (Maynard et al., 2006) .", |
| "cite_spans": [ |
| { |
| "start": 874, |
| "end": 896, |
| "text": "(Maynard et al., 2006)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Nevertheless, dealing with long-form text remains a significant challenge. One possible solution is to infer richer representations of latent structure using a structured attention mechanism (Liu and Lapata, 2018) , which might highlight key dependencies between scenes in a script. Another method could be to define auxiliary tasks as in Jiang and Bansal (2018) to encourage better selec-tion. Lastly, sparse versions of the softmax function (Martins and Astudillo, 2016) could be used to address the sparse distribution of salient information across a screenplay.", |
| "cite_spans": [ |
| { |
| "start": 191, |
| "end": 213, |
| "text": "(Liu and Lapata, 2018)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 339, |
| "end": 362, |
| "text": "Jiang and Bansal (2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 443, |
| "end": 472, |
| "text": "(Martins and Astudillo, 2016)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In this work, we propose and evaluate various neural network architectures for learning fixeddimensional representations of full-length film scripts. We hypothesize that a network design mimicking the documents' internal structure will boost performance. Experiments on two tag prediction tasks support this hypothesis, confirming the benefits of using hierarchical attention-based models and of incorporating distinctions between various scene components directly into the model. In order to explore the information contained within scenelevel embeddings, we present an unsupervised technique for bootstrapping scene \"descriptors\" and visualizing their trajectories over the course of the screenplay. For future work, we plan to investigate richer ways of representing character identities, which could allow character embeddings to be compared across movies and linked to character archetypes. A persona-based characterization of the screenplay would provide a complementary view to the current plot-based analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Scripts and screenplays are an underutilized and underanalyzed data source in modern NLP -indeed, most work on narratology in NLP concentrates on short stories and book/movie summaries. This paper shows that capitalizing on their rich internal structure largely obviates the need for featureengineering, or other more complicated architectures, a lesson that may prove instructive in other areas of discourse processing. Our hope is that these results encourage more people to work on this fascinating domain.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In this section, we present additional statistics on the evaluation sets used in this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1 Additional Dataset Statistics", |
| "sec_num": null |
| }, |
| { |
| "text": "Min 10th % 90th % Max 4025 16,240 29,376 52,059 Table 5 : Statistics on the number of tokens per script in the Scriptbase-J corpus. We use the same script corpus with two different tag sets -the Jinni tags provided with ScriptBase and a tag set designed by internal annotators. Drugs/Alcohol, Profanity, Violent Content Logline \"The lives of two mob hit men, a boxer, a gangster's wife, and a pair of diner bandits intertwine in four tales of violence and redemption.\" Table 8 : Statistics for the three tag attributes applied in our internally-tagged dataset: average number of tags per script, and the minimum/maximum number of movies associated with any single value.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 48, |
| "end": 55, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 469, |
| "end": 476, |
| "text": "Table 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A.1 Additional Dataset Statistics", |
| "sec_num": null |
| }, |
| { |
| "text": "To estimate tag-tag similarity percentiles, we calculate the distance between tag embeddings learned via an auxiliary model trained on a related supervised learning task. In our case, the related task is to predict the audience segment of a movie, given a tag set. The general approach is easily replicable via any model that projects tags into a welldefined similarity space (e.g., knowledge-graph embeddings (?) or tag-based autoencoders). Given a tag embedding space, the similarity percentile of a pair of tag values is estimated as follows. For a given tag attribute, the pairwise cosine distance between tag embeddings is computed for all tag-tag value pairs. For a given pair, its similarity percentile is then calculated with reference to the overall distribution for that attribute.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Tag Similarity Scoring", |
| "sec_num": null |
| }, |
| { |
| "text": "Similarity thresholding simplifies the tag prediction task by significantly reducing the perplexity of the tag set, while only marginally reducing its cardinality. Cardinality can be estimated via permutations. If n is the cardinality of the tag set, the number of permutations p of different tag pairs (k = 2) is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Tag Similarity Scoring", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(n, k) = n! (n \u2212 k)!", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "A.2 Tag Similarity Scoring", |
| "sec_num": null |
| }, |
| { |
| "text": "which simplifies to n 2 \u2212 n \u2212 p = 0. Likewise, the entropy of a list of n distinct tag values of varying probabilities is given by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Tag Similarity Scoring", |
| "sec_num": null |
| }, |
| { |
| "text": "H(X) = H(tag 1 , ..., tag n ) = \u2212 n i=1 tag i log 2 tag i (10)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Tag Similarity Scoring", |
| "sec_num": null |
| }, |
| { |
| "text": "The perplexity over tags is then simply 2 H(X) .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Tag Similarity Scoring", |
| "sec_num": null |
| }, |
| { |
| "text": "Perplexity Cardinality Genre 42% 16% Mood 77% 16% Plot 79% 16% Table 10 : The percent decrease in perplexity and cardinality, respectively, as the similarity threshold decreases from 100th percentile similarity (baseline) to 70th percentile.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 63, |
| "end": 71, |
| "text": "Table 10", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tag", |
| "sec_num": null |
| }, |
| { |
| "text": "As the similarity threshold decreases, the number of tags treated as equivalent correspondingly increases. Mapping these \"equivalents\" to a shared label in our list of tag values allows us to calculate updated values for tag (1) perplexity and (2) cardinality. As illustrated by Table 10 , rather than leading to large reductions in the overall cardinality of the tag set, similarity thresholding mainly serves to decrease perplexity by eliminating redundant/highly similar alternatives. Thus, thresholding at once significantly decreases the complexity of the prediction task, while yielding a potentially more representative picture of model performance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 279, |
| "end": 287, |
| "text": "Table 10", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tag", |
| "sec_num": null |
| }, |
| { |
| "text": "We only take into account characters at the scene level, i.e. we do not associate characters with each dialogue statement, leaving this addition to future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Using richer contextual word representations will improve performance, but is orthogonal to the purpose of this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We tried both with and without attention and found the variant without attention to give slightly better results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The authors would like to thank the anonymous reviewers for their valuable comments and feedback, and Ashish Rastogi for his support and guidance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Fine-grained analysis of sentence embeddings using auxiliary prediction tasks", |
| "authors": [ |
| { |
| "first": "Yossi", |
| "middle": [], |
| "last": "Adi", |
| "suffix": "" |
| }, |
| { |
| "first": "Einat", |
| "middle": [], |
| "last": "Kermany", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ofer", |
| "middle": [], |
| "last": "Lavi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yossi Adi, Einat Kermany, Yonatan Belinkov, Ofer Lavi, and Yoav Goldberg. 2016. Fine-grained anal- ysis of sentence embeddings using auxiliary predic- tion tasks. Proceedings of ICLR.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Elements of Style for Screenwriters", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Argentini", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Argentini. 1998. Elements of Style for Screenwrit- ers. Lone Eagle Publishing.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In Proceedings of ICLR.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Learning latent personas of film characters", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Bamman", |
| "suffix": "" |
| }, |
| { |
| "first": "O'", |
| "middle": [], |
| "last": "Brendan", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Connor", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Bamman, Brendan O'Connor, and Noah A. Smith. 2013. Learning latent personas of film char- acters. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A bayesian mixed effects model of literary character", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Bamman", |
| "suffix": "" |
| }, |
| { |
| "first": "Ted", |
| "middle": [], |
| "last": "Underwood", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Bamman, Ted Underwood, and Noah A. Smith. 2014. A bayesian mixed effects model of literary character. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Santanu Pal, Matt Post, and Marcos Zampieri. 2019. Findings of the 2019 conference on machine translation (wmt19)", |
| "authors": [ |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Marta", |
| "middle": [ |
| "R" |
| ], |
| "last": "Costa-Juss\u00e0", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Federmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Fishel", |
| "suffix": "" |
| }, |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Huck", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Shervin", |
| "middle": [], |
| "last": "Malmasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| }, |
| { |
| "first": "Mathias", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of WMT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lo\u00efc Barrault, Ond\u0159ej Bojar, Marta R. Costa-juss\u00e0, Christian Federmann, Mark Fishel, Yvette Gra- ham, Barry Haddow, Matthias Huck, Philipp Koehn, Shervin Malmasi, Christof Monz, Mathias M\u00fcller, Santanu Pal, Matt Post, and Marcos Zampieri. 2019. Findings of the 2019 conference on machine transla- tion (wmt19). In Proceedings of WMT.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Content-based citation recommendation", |
| "authors": [ |
| { |
| "first": "Chandra", |
| "middle": [], |
| "last": "Bhagavatula", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Feldman", |
| "suffix": "" |
| }, |
| { |
| "first": "Russell", |
| "middle": [], |
| "last": "Power", |
| "suffix": "" |
| }, |
| { |
| "first": "Waleed", |
| "middle": [], |
| "last": "Ammar", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chandra Bhagavatula, Sergey Feldman, Russell Power, and Waleed Ammar. 2018. Content-based citation recommendation. In Proceedings NAACL.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Dynamic topic models", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "D" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lafferty", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David M. Blei and John D. Lafferty. 2006. Dynamic topic models. In Proceedings of ICML.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Stacked graphsgeometry aesthetics", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Byron", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Wattenberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "IEEE Transactions on Visualization and Computer Graphics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "L Byron and M. Wattenberg. 2008. Stacked graphs - geometry aesthetics. IEEE Transactions on Visual- ization and Computer Graphics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Unsupervised learning of narrative event chains", |
| "authors": [ |
| { |
| "first": "Nathanael", |
| "middle": [], |
| "last": "Chambers", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nathanael Chambers and Dan Jurafsky. 2008. Unsu- pervised learning of narrative event chains. In Pro- ceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Where have I heard this story before? identifying narrative similarity in movie remakes", |
| "authors": [ |
| { |
| "first": "Snigdha", |
| "middle": [], |
| "last": "Chaturvedi", |
| "suffix": "" |
| }, |
| { |
| "first": "Shashank", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Snigdha Chaturvedi, Shashank Srivastava, and Dan Roth. 2018. Where have I heard this story before? identifying narrative similarity in movie remakes. In Proceedings of NAACL.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "What you can cram into a single vector: Probing sentence embeddings for linguistic properties", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "Kruszewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, German Kruszewski, Guillaume Lam- ple, Lo\u00efc Barrault, and Marco Baroni. 2018. What you can cram into a single vector: Probing sentence embeddings for linguistic properties. Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Character-based kernels for novelistic plot structure", |
| "authors": [ |
| { |
| "first": "Micha", |
| "middle": [ |
| "Elsner" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Micha Elsner. 2012. Character-based kernels for novel- istic plot structure. In Proceedings of EACL.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Allennlp: A deep semantic natural language processing platform", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Grus", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Oyvind", |
| "middle": [], |
| "last": "Tafjord", |
| "suffix": "" |
| }, |
| { |
| "first": "Pradeep", |
| "middle": [], |
| "last": "Dasigi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nelson", |
| "middle": [ |
| "F" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Schmitz", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson F. Liu, Matthew Peters, Michael Schmitz, and Luke S. Zettlemoyer. 2017. Allennlp: A deep semantic natural language processing platform.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Understanding the difficulty of training deep feedforward neural networks", |
| "authors": [ |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Glorot", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of AIStats", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understand- ing the difficulty of training deep feedforward neural networks. In Proceedings of AIStats.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Movie script summarization as graph-based scene extraction", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "John Gorinski", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip John Gorinski and Mirella Lapata. 2015. Movie script summarization as graph-based scene extrac- tion. In Proceedings of NAACL.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "What's this movie about? a joint neural network architecture for movie content analysis", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "John Gorinski", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip John Gorinski and Mirella Lapata. 2018. What's this movie about? a joint neural network architec- ture for movie content analysis. In Proceedings of NAACL.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Automatically producing plot unit representations for narrative text", |
| "authors": [ |
| { |
| "first": "Amit", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Ellen", |
| "middle": [], |
| "last": "Riloff", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amit Goyal, Ellen Riloff, and Hal Daum\u00e9, III. 2010. Automatically producing plot unit representations for narrative text. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "An unsupervised neural attention model for aspect extraction", |
| "authors": [ |
| { |
| "first": "Ruidan", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Wee Sun Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dahlmeier", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruidan He, Wee Sun Lee, Hwee Tou Ng, and Daniel Dahlmeier. 2017. An unsupervised neural attention model for aspect extraction. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "The unstoppable rise of computational linguistics in deep learning", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Henderson", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Henderson. 2020. The unstoppable rise of com- putational linguistics in deep learning. In Proceed- ings of ACL.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Feuding families and former Friends: Unsupervised learning for dynamic fictional relationships", |
| "authors": [ |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Anupam", |
| "middle": [], |
| "last": "Guha", |
| "suffix": "" |
| }, |
| { |
| "first": "Snigdha", |
| "middle": [], |
| "last": "Chaturvedi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boyd-Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohit Iyyer, Anupam Guha, Snigdha Chaturvedi, Jor- dan Boyd-Graber, and Hal Daum\u00e9 III. 2016. Feud- ing families and former Friends: Unsupervised learning for dynamic fictional relationships. In Pro- ceedings of NAACL.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Neural discourse structure for text categorization", |
| "authors": [ |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yangfeng Ji and Noah A. Smith. 2017. Neural dis- course structure for text categorization. In Proceed- ings of ACL.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Closed-book training to improve summarization encoder memory", |
| "authors": [ |
| { |
| "first": "Yichen", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yichen Jiang and Mohit Bansal. 2018. Closed-book training to improve summarization encoder memory. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Folksonomication: Predicting tags for movies from plot synopses using emotion flow encoded neural network", |
| "authors": [ |
| { |
| "first": "Sudipta", |
| "middle": [], |
| "last": "Kar", |
| "suffix": "" |
| }, |
| { |
| "first": "Suraj", |
| "middle": [], |
| "last": "Maharjan", |
| "suffix": "" |
| }, |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceddings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sudipta Kar, Suraj Maharjan, and Thamar Solorio. 2018. Folksonomication: Predicting tags for movies from plot synopses using emotion flow encoded neu- ral network. In Proceddings of COLING.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "The NarrativeQA reading comprehension challenge", |
| "authors": [ |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Ko\u010disk\u00fd", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Schwarz", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "Moritz" |
| ], |
| "last": "Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "G\u00e1bor", |
| "middle": [], |
| "last": "Melis", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom\u00e1\u0161 Ko\u010disk\u00fd, Jonathan Schwarz, Phil Blunsom, Chris Dyer, Karl Moritz Hermann, G\u00e1bor Melis, and Edward Grefenstette. 2018. The NarrativeQA read- ing comprehension challenge. Transactions of the ACL.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Plot units and narrative summarization", |
| "authors": [ |
| { |
| "first": "Wendy", |
| "middle": [ |
| "G" |
| ], |
| "last": "Lehnert", |
| "suffix": "" |
| } |
| ], |
| "year": 1981, |
| "venue": "Cognitive Science", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wendy G. Lehnert. 1981. Plot units and narrative sum- marization. Cognitive Science.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Assessing the ability of LSTMs to learn syntax-sensitive dependencies", |
| "authors": [ |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Dupoux", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transactions of the ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tal Linzen, Emmanuel Dupoux, and Yoav Goldberg. 2016. Assessing the ability of LSTMs to learn syntax-sensitive dependencies. Transactions of the ACL.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Learning structured text representations. Transactions of the Association for Computational Linguistics", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang Liu and Mirella Lapata. 2018. Learning struc- tured text representations. Transactions of the Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Synthesis Lectures on Human Language Technologies: Computational Modeling of Narrative", |
| "authors": [ |
| { |
| "first": "Inderjeet", |
| "middle": [], |
| "last": "Mani", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Inderjeet Mani. 2012. Synthesis Lectures on Human Language Technologies: Computational Modeling of Narrative. Morgan Claypool.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "From softmax to sparsemax: A sparse model of attention and multi-label classification", |
| "authors": [ |
| { |
| "first": "Andre", |
| "middle": [], |
| "last": "Martins", |
| "suffix": "" |
| }, |
| { |
| "first": "Ramon", |
| "middle": [], |
| "last": "Astudillo", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andre Martins and Ramon Astudillo. 2016. From soft- max to sparsemax: A sparse model of attention and multi-label classification. In Proceedings of ICML.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Metrics for evaluation of ontology-based information extraction", |
| "authors": [ |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Maynard", |
| "suffix": "" |
| }, |
| { |
| "first": "Wim", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaoyong", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "CEUR Workshop Proceedings", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diana Maynard, Wim Peters, and Yaoyong Li. 2006. Metrics for evaluation of ontology-based informa- tion extraction. In CEUR Workshop Proceedings.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Optimizing semantic coherence in topic models", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hanna", |
| "suffix": "" |
| }, |
| { |
| "first": "Edmund", |
| "middle": [], |
| "last": "Wallach", |
| "suffix": "" |
| }, |
| { |
| "first": "Miriam", |
| "middle": [], |
| "last": "Talley", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Leenders", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Mimno, Hanna M Wallach, Edmund Talley, Miriam Leenders, and Andrew McCallum. 2011. Optimizing semantic coherence in topic models. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Using aspect extraction approaches to generate review summaries and user profiles", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Mitcheltree", |
| "suffix": "" |
| }, |
| { |
| "first": "Skyler", |
| "middle": [], |
| "last": "Wharton", |
| "suffix": "" |
| }, |
| { |
| "first": "Avneesh", |
| "middle": [], |
| "last": "Saluja", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher Mitcheltree, Skyler Wharton, and Avneesh Saluja. 2018. Using aspect extraction approaches to generate review summaries and user profiles. In Pro- ceedings of NAACL.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Sparse coding with an overcomplete basis set: A strategy employed by v1? Vision Research", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bruno", |
| "suffix": "" |
| }, |
| { |
| "first": "David J", |
| "middle": [], |
| "last": "Olshausen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Field", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bruno A Olshausen and David J Field. 1997. Sparse coding with an overcomplete basis set: A strategy employed by v1? Vision Research.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Pytorch: An imperative style, high-performance deep learning library", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Paszke", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Massa", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Lerer", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Bradbury", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Chanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Killeen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zeming", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Gimelshein", |
| "suffix": "" |
| }, |
| { |
| "first": "Luca", |
| "middle": [], |
| "last": "Antiga", |
| "suffix": "" |
| }, |
| { |
| "first": "Alban", |
| "middle": [], |
| "last": "Desmaison", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Kopf", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zachary", |
| "middle": [], |
| "last": "Devito", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of NeurIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Te- jani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. Pytorch: An imperative style, high-performance deep learn- ing library. In Proceedings of NeurIPS.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Sceneskim: Searching and browsing movies using synchronized captions, scripts and plot summaries", |
| "authors": [ |
| { |
| "first": "Amy", |
| "middle": [], |
| "last": "Pavel", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [ |
| "B" |
| ], |
| "last": "Goldman", |
| "suffix": "" |
| }, |
| { |
| "first": "Bj\u00f6rn", |
| "middle": [], |
| "last": "Hartmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Maneesh", |
| "middle": [], |
| "last": "Agrawala", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of UIST", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amy Pavel, Dan B. Goldman, Bj\u00f6rn Hartmann, and Maneesh Agrawala. 2015. Sceneskim: Searching and browsing movies using synchronized captions, scripts and plot summaries. In Proceedings of UIST.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Cluscite: Effective citation recommendation by information network-based clustering", |
| "authors": [ |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Jialu", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Urvashi", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Quanquan", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lidan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiawei", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of KDD", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiang Ren, Jialu Liu, Xiao Yu, Urvashi Khandelwal, Quanquan Gu, Lidan Wang, and Jiawei Han. 2014. Cluscite: Effective citation recommendation by in- formation network-based clustering. In Proceedings of KDD.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Does string-based neural mt learn source syntax?", |
| "authors": [ |
| { |
| "first": "Xing", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Inkit", |
| "middle": [], |
| "last": "Padhi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xing Shi, Inkit Padhi, and Kevin Knight. 2016. Does string-based neural mt learn source syntax? In Pro- ceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Perelygin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Chuang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment tree- bank. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Linguistically-informed self-attention for semantic role labeling", |
| "authors": [ |
| { |
| "first": "Emma", |
| "middle": [], |
| "last": "Strubell", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Verga", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Andor", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emma Strubell, Patrick Verga, Daniel Andor, David Weiss, and Andrew McCallum. 2018. Linguistically-informed self-attention for semantic role labeling. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "End-to-end memory networks", |
| "authors": [ |
| { |
| "first": "Sainbayar", |
| "middle": [], |
| "last": "Sukhbaatar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Rob", |
| "middle": [], |
| "last": "Fergus", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NeurIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sainbayar Sukhbaatar, arthur szlam, Jason Weston, and Rob Fergus. 2015. End-to-end memory networks. In Proceedings of NeurIPS.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [ |
| "R" |
| ], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In Pro- ceedings of ICLR.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Wsabie: Scaling up to large vocabulary image annotation", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Usunier", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Weston, Samy Bengio, and Nicolas Usunier. 2011. Wsabie: Scaling up to large vocabulary im- age annotation. In Proceedings of IJCAI.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "A broad-coverage challenge corpus for sentence understanding through inference", |
| "authors": [ |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Nangia", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of NAACL.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Hierarchical attention networks for document classification", |
| "authors": [ |
| { |
| "first": "Zichao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Diyi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Smola", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alex Smola, and Eduard Hovy. 2016. Hierarchical attention networks for document classification. In Proceedings of NAACL.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "A portion of the screenplay for Pulp Fiction, annotated with the common scene components.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "num": null, |
| "text": "The architecture of our script encoder, largely following the structure inFig. 1.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "num": null, |
| "text": "Scene embedding for loss calculation: u t Scene embeddings to interpret: v t Descriptors: R Predictor X Predicted descriptor weights: o t Bag of Words with Attention Scene encoder Predicted reconstruction of scene embedding as a weighted mixture of descriptors: w t Minimize reconstruction error Figure 3: A pictorial representation of the descriptor model.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF3": { |
| "num": null, |
| "text": "F1 score of various tag attributes as a function of the similarity threshold percentile.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF4": { |
| "num": null, |
| "text": "4: Examples of retrieved descriptors. Trajectories for \"Violence\", \"Military\", and \"Residential\" are shown inFig. 5.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF5": { |
| "num": null, |
| "text": "Descriptor Trajectories for Pearl Harbor, Pretty Woman, and Pulp Fiction. The y-axis is a smoothed and rescaled descriptor weight, i.e. ot in Eq. 6. Events: (A) Attack on Pearl Harbor begins (B) Rising tension at the equestrian club and (C) Confrontation at the pawn shop. Word clusters corresponding to each descriptor are in", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF2": { |
| "text": "Post-processed version ofFig.1.", |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF5": { |
| "text": "", |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF7": { |
| "text": "", |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF9": { |
| "text": "Examples of Scriptbase-J tag attributes, tag values, and a logline, for the film \"Pulp Fiction\".", |
| "num": null, |
| "content": "<table><tr><td>Tag</td><td colspan=\"2\">Internal Scriptbase-J</td></tr><tr><td>Genre</td><td>9</td><td>31</td></tr><tr><td>Mood</td><td>65</td><td>18</td></tr><tr><td>Attitude</td><td>-</td><td>8</td></tr><tr><td>Plot</td><td>82</td><td>101</td></tr><tr><td>Place</td><td>-</td><td>24</td></tr><tr><td>Flag</td><td>-</td><td>6</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF10": { |
| "text": "The number of distinct tag values for each tag attribute across the two datasets. Cardinalities for Scriptbase-J tag attributes are identical to Gorinski and Lapata (2018) except for the removal of one mood tag value when filtering for erroneously preprocessed scripts.", |
| "num": null, |
| "content": "<table><tr><td>Tag</td><td>Avg. #tags/script</td><td>Min #scripts/tag</td><td>Max #scripts/tag</td></tr><tr><td>Genre</td><td>1.74</td><td>17</td><td>347</td></tr><tr><td>Mood</td><td>3.29</td><td>15</td><td>200</td></tr><tr><td>Plot</td><td>2.50</td><td>15</td><td>73</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF12": { |
| "text": "Examples of closely related and unrelated tag values in the Scriptbase-J tag set.", |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |