| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:12:08.222610Z" |
| }, |
| "title": "Neural End-to-end Coreference Resolution for German in Different Domains", |
| "authors": [ |
| { |
| "first": "Fynn", |
| "middle": [], |
| "last": "Schr\u00f6der", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Group", |
| "institution": "Universit\u00e4t Hamburg", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hans", |
| "middle": [ |
| "Ole" |
| ], |
| "last": "Hatzel", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Group", |
| "institution": "Universit\u00e4t Hamburg", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Biemann", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Group", |
| "institution": "Universit\u00e4t Hamburg", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We apply neural coreference resolution to German, surpassing the previous state-of-theart performance by a wide margin of 10-30 points F1 across three established datasets for German. This is achieved by a neural end-to-end approach, training contextual wordembeddings jointly with mention and entity similarity scores. We explore the impact of various parameters such as language models, pretraining and computational limits with respect to German data. In an effort to support datasets representing the domains of both news and literature, we make use of two distinct model architectures: a mention linking-based and an incremental entity-based approach that should scale to very long documents such as literary works. Our code and ready-to-use models are publicly available. * denotes equal contribution [Alice] 1 was not a bit hurt, and [she] 1 jumped up on to [her] 1 feet in a moment: [she] 1 looked up, but it was all dark overhead; before [her] 1 was [another long passage ] 2 , and [the White Rabbit ] 3 was still in sight, hurrying down [it] 2 .", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We apply neural coreference resolution to German, surpassing the previous state-of-theart performance by a wide margin of 10-30 points F1 across three established datasets for German. This is achieved by a neural end-to-end approach, training contextual wordembeddings jointly with mention and entity similarity scores. We explore the impact of various parameters such as language models, pretraining and computational limits with respect to German data. In an effort to support datasets representing the domains of both news and literature, we make use of two distinct model architectures: a mention linking-based and an incremental entity-based approach that should scale to very long documents such as literary works. Our code and ready-to-use models are publicly available. * denotes equal contribution [Alice] 1 was not a bit hurt, and [she] 1 jumped up on to [her] 1 feet in a moment: [she] 1 looked up, but it was all dark overhead; before [her] 1 was [another long passage ] 2 , and [the White Rabbit ] 3 was still in sight, hurrying down [it] 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Coreference resolution is the task of resolving text spans in documents that refer to the same entities. These are grouped into mention-clusters with each cluster representing one entity. Figure 1 shows coreference annotations on a literary text with different entities being denoted by both subscripts and colors. Tasks such as question answering (Morton, 1999) or text summarization (Steinberger et al., 2007) can rely on coreference resolution as part of the language processing pipeline. Bamman et al. (2014) demonstrated that coreference resolution is also applicable to literary analysis. The task has recently seen large improvements as systems moved from rule-based (e.g. Roesiger and Kuhn, 2016; Lee et al., 2011) to neural approaches (e.g. Lee et al., 2017; Joshi et al., 2019) . This advancement from a CoNLL-F1-score of 57.8, achieved by a rule-based system in the original CoNLL-2012 shared task (Pradhan et al., 2012) , to 67.2 in the Figure 1 : Coreference gold annotations for \"Alice's Adventures in Wonderland\" (annotations from Bamman et al., 2020) first end-to-end neural system (Lee et al., 2017) has shown that neural systems are key to state-of-theart performance.", |
| "cite_spans": [ |
| { |
| "start": 348, |
| "end": 362, |
| "text": "(Morton, 1999)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 385, |
| "end": 411, |
| "text": "(Steinberger et al., 2007)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 492, |
| "end": 512, |
| "text": "Bamman et al. (2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 680, |
| "end": 704, |
| "text": "Roesiger and Kuhn, 2016;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 705, |
| "end": 722, |
| "text": "Lee et al., 2011)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 750, |
| "end": 767, |
| "text": "Lee et al., 2017;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 768, |
| "end": 787, |
| "text": "Joshi et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 909, |
| "end": 931, |
| "text": "(Pradhan et al., 2012)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1046, |
| "end": 1066, |
| "text": "Bamman et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 1098, |
| "end": 1116, |
| "text": "(Lee et al., 2017)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 188, |
| "end": 196, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 949, |
| "end": 957, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Coreference resolution on German using neural networks has received little attention. There has, to our knowledge, no work been reported on German news datasets using neural networks yet. This work is also the first to use cross-task learning to improve performance on German literary datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We apply and adapt exiting approaches to coreference on German, making our code and models publicaly available. 1 There are two approaches to neural coreference resolution that we consider: A mention-linking-based and an entity-linking-based approach. Both have an initial mention proposal step, finding text spans that are likely to represent mentions. In mention-linking approaches, out of the cross-products of mentions, those mentions with the highest likelihood are considered. Each such mention is connected to its highest-scoring antecedent with transitively connected mentions forming entities.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The entity-representation-based approach also involves the initial mention proposal step. However, rather then creating links on a per-mention basis, initial mentions are considered to be entity representations, with each subsequent mention be-ing compared to existing entity representations and assigned to those that match them best. This way memory usage and computational effort can be reduced, as it is proportional to the number of entities, rather than the square of the number of mentions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Relevant prior work can be put into two distinct categories: (a) Neural, state-of-the-art coreference resolution developed primarily on English (b) Coreference resolution applied to German.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Most neural coreference resolution models perform a ranking of antecedents based on the pairwise scores of mention candidates (Wiseman et al., 2015; Clark and Manning, 2016a; Lee et al., 2017) , at this only relying on local decisions that may not be globally optimal to form coherent entities . This general architecture has been improved on in multiple ways.", |
| "cite_spans": [ |
| { |
| "start": 126, |
| "end": 148, |
| "text": "(Wiseman et al., 2015;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 149, |
| "end": 174, |
| "text": "Clark and Manning, 2016a;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 175, |
| "end": 192, |
| "text": "Lee et al., 2017)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To address the issue of global optimization, Clark and Manning (2016b) and Wiseman et al. (2016) create entity representations during the ranking step. ; Kantor and Globerson (2019) iteratively refine mention representations with associated antecedent information, performing what they refer to as higher-order inference.", |
| "cite_spans": [ |
| { |
| "start": 45, |
| "end": 70, |
| "text": "Clark and Manning (2016b)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 75, |
| "end": 96, |
| "text": "Wiseman et al. (2016)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 154, |
| "end": 181, |
| "text": "Kantor and Globerson (2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "While the end-to-end coreference model of Lee et al. (2017) uses a bidirectional LSTM (Hochreiter and Schmidhuber, 1997) to produce span representations, see a 3.2 F1 score increase on the English CoNLL-2012 shared task by additionally using ELMo (Peters et al., 2018) embeddings. also modify the model to perform coarse-to-fine antecedent pruning enabling an efficient computation and potentially allowing the processing of longer documents. Joshi et al. (2019) and Kantor and Globerson (2019) improve upon this by using BERT (Devlin et al., 2019) embeddings instead of the LSTM-based representations and gain another 3.3 F1 points.", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 120, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 247, |
| "end": 268, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 443, |
| "end": 462, |
| "text": "Joshi et al. (2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 467, |
| "end": 494, |
| "text": "Kantor and Globerson (2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 527, |
| "end": 548, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Recently, Joshi et al. (2020) presented a model optimized for span representations named Span-BERT and saw another 2.5 point increase in F1 score, which has been reproduced by Xu and Choi (2020) . Wu et al. (2020) have taken a different approach to coreference resolution; they outperform previous state of the art by 3.5 F1 points in part due to the ability to recover missed mentions by framing the task as a question-answering problem. Toshniwal et al. (2020) ; Xia et al. (2020) both introduce incremental approaches to coreference resolution. Instead of comparing mention pairs like Lee et al. (2017) , they compare mentions with entity representations, with the entity representations being produced from a linear combination of their mentions. Both approaches work by iteratively processing all mentions and scoring each mention with regard to a set of entities; as a result, an evaluation of the full cross-product of mentions is not necessary. The two approaches differ slightly in how they handle the introduction of new entities.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 29, |
| "text": "Joshi et al. (2020)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 176, |
| "end": 194, |
| "text": "Xu and Choi (2020)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 197, |
| "end": 213, |
| "text": "Wu et al. (2020)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 439, |
| "end": 462, |
| "text": "Toshniwal et al. (2020)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 465, |
| "end": 482, |
| "text": "Xia et al. (2020)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 588, |
| "end": 605, |
| "text": "Lee et al. (2017)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For coreference resolution on German texts, published work predates the age of neural networks in natural language processing. The CorZu system (Klenner and Tuggener, 2011; Tuggener and Klenner, 2014 ) is a rule-based incremental entitymention model that has been extended with Markov Logic Networks for the antecedent selection.", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 172, |
| "text": "(Klenner and Tuggener, 2011;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 173, |
| "end": 199, |
| "text": "Tuggener and Klenner, 2014", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Roesiger and Kuhn (2016) adapted the English system of Bj\u00f6rkelund and Kuhn (2014) to German. A directed tree where each node represents a mention is used to model the coreferences in a document. For determining antecedents, both local and non-local handcrafted features are employed. They created the current state-of-the-art approach for German news datasets, evaluating their system on the SemEval-2010 shared task and on version 10 of the T\u00fcBa-D/Z dataset.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 81, |
| "text": "Bj\u00f6rkelund and Kuhn (2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The domain of literature has, for both German and English, received increased attention in recent years with regard to coreference resolution. Roesiger et al. (2018) considered the domain specific challenges and phenomena of literature. Bamman et al. (2020) released an English dataset and Krug et al. (2018) released a German dataset (see Section 3.2 for details). While Krug (2020) performed coreference resolution on German literary data, Toshniwal et al. (2020) used the English dataset. Krug (2020) compare various approaches to coreference resolution on German historic novels using the DROC dataset (Krug et al., 2018) . Their bestperforming system in a gold-mention scenario uses a rule-based Stanford Sieve approach (Lee et al., 2011) , iteratively applying rules starting from the most precise rule, going to less precise rules. When mention spans are generated by the model, the endto-end neural network, based on the approach by Lee et al. (2017) , performs about on par with the rule-based systems in conjunction with preprocessing pipelines.", |
| "cite_spans": [ |
| { |
| "start": 143, |
| "end": 165, |
| "text": "Roesiger et al. (2018)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 237, |
| "end": 257, |
| "text": "Bamman et al. (2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 290, |
| "end": 308, |
| "text": "Krug et al. (2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 442, |
| "end": 465, |
| "text": "Toshniwal et al. (2020)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 492, |
| "end": 503, |
| "text": "Krug (2020)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 606, |
| "end": 625, |
| "text": "(Krug et al., 2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 725, |
| "end": 743, |
| "text": "(Lee et al., 2011)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 941, |
| "end": 958, |
| "text": "Lee et al. (2017)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Evaluation of coreference data presents a challenge, different proposed metrics emphasise differ-ent aspects of a model's performance. An average of the three metrics M U C, B 3 , and CEAF \u03c6 4 has been used in the CoNLL-2012 task (Pradhan et al., 2012) . As these metrics are widely used we focus on them for reporting our results, including an average of the three, the CoNLL-F1 score.", |
| "cite_spans": [ |
| { |
| "start": 230, |
| "end": 252, |
| "text": "(Pradhan et al., 2012)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "3 German Coreference Datasets", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The standard corpus for coreference resolution in German is T\u00fcBa-D/Z (Telljohann et al., 2017; Naumann and M\u00f6ller, 2006) , a manually annotated collection of newspaper articles released in multiple versions that incrementally add more documents. It was also used as the data source for the German part of the SemEval-2010 shared task on coreference resolution (Recasens et al., 2010) .", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 94, |
| "text": "(Telljohann et al., 2017;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 95, |
| "end": 120, |
| "text": "Naumann and M\u00f6ller, 2006)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 360, |
| "end": 383, |
| "text": "(Recasens et al., 2010)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "News", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To be comparable with previous work, we chose to use SemEval-2010 and T\u00fcBa-D/Z release 10.0 instead of the marginally larger 11.0 for most of our experiments. As there is no official split for the T\u00fcBa-D/Z, we use the same splits as previous work (Roesiger and Kuhn, 2016 Compared to the standard English coreference corpus, OntoNotes (Weischedel et al., 2013) , used in the CoNLL-2012 shared task on coreference resolution (Pradhan et al., 2012), T\u00fcBa-D/Z neither contains different genres of texts nor additional metadata such as speaker information. Regarding statistics such as average mentions per entity, mentions/sentence length and tokens/sentences/entities per document, German T\u00fcBa-D/Z 10.0 and English OntoNotes 5.0 are remarkably similar.", |
| "cite_spans": [ |
| { |
| "start": 247, |
| "end": 271, |
| "text": "(Roesiger and Kuhn, 2016", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 335, |
| "end": 360, |
| "text": "(Weischedel et al., 2013)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "News", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The DROC dataset (Krug et al., 2018) contains 90 coreference annotated literary documents where each document comprises one chapter with an average length of 4369.49 tokens. We use the splits established by Krug (2020) , i.e. 58 training, 14 development and 18 test documents. There is a total of 51 797 mentions in 5365 clusters, 2409 of these are singleton clusters. As a result, while 45% of 2 for corpus statistics, see Table 9 in the appendix Table 1 : Inter-annotator F1 scores for DROC as calculated using the scorer by Pradhan et al. (2012) based on the individual annotator's data by Krug et al. (2018) .", |
| "cite_spans": [ |
| { |
| "start": 17, |
| "end": 36, |
| "text": "(Krug et al., 2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 207, |
| "end": 218, |
| "text": "Krug (2020)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 593, |
| "end": 611, |
| "text": "Krug et al. (2018)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 424, |
| "end": 431, |
| "text": "Table 9", |
| "ref_id": "TABREF14" |
| }, |
| { |
| "start": 448, |
| "end": 455, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Literature", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Mention-F1 MUC-F1 B 3 -F1 CEAF \u03c64 -F1 CoNLL-F1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Literature", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "clusters are singleton clusters, only 4.7% of mentions are singletons. Our calculations for the performance of human annotators on the subset of DROC are listed in Table 1 , providing an upper bound for our performance expectations. In contrast to other datasets (e.g. Bamman et al., 2020) , only mention heads are annotated, rather than whole nominal phrases. This means that in the sentence, \"and [the driver] was none other than [that cursed Englishman]\" (from the dataset by Bamman et al. 2020\"The Scarlet Pimpernel\"), only the spans \"Englishman\" and \"driver\" would be annotated as coreferring instead. Thus, only spans up to a short length need to be considered in the mention proposal step. DROC also differentiates itself from other datasets in that it only annotates references to characters. More generally, literary data, when compared to news texts, comes with the added challenge of document length. Longer documents tend to come with more mentions, DROC, for example, contains an average of 575.52 mentions per document whereas SemEval only has an average of 97.79. In general, increased document length lead to longer processing time, larger computational effort and higher memory requirements.", |
| "cite_spans": [ |
| { |
| "start": 269, |
| "end": 289, |
| "text": "Bamman et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 164, |
| "end": 171, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Literature", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In this section, we describe our German coreference resolution models in detail. We build on the widely adapted neural end-to-end architecture developed by Lee et al. (2017 , improved by Joshi et al. (2019) and re-implemented in PyTorch (Paszke et al., 2019) by Xu and Choi (2020) . Although the CorefQA system (Wu et al., 2020) is currently the top-performing system for English, we chose to not build upon it because it is more complex and requires vastly more computational resources than our chosen approach.", |
| "cite_spans": [ |
| { |
| "start": 156, |
| "end": 172, |
| "text": "Lee et al. (2017", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 187, |
| "end": 206, |
| "text": "Joshi et al. (2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 237, |
| "end": 258, |
| "text": "(Paszke et al., 2019)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 262, |
| "end": 280, |
| "text": "Xu and Choi (2020)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 311, |
| "end": 328, |
| "text": "(Wu et al., 2020)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The general idea of our models is to first detect mentions and then to link them. Each document is processed individually during both training and inference; Figure 2 visualizes a single document being processed by both model variants. First, contextual ELECTRA (Clark et al., 2020) Figure 2 : Conceptual visualization of our two end-to-end model variants processing an example document. Both models are based on the same mention proposal step. While the incremental model operates on an ever-growing set of entities, the coarse-to-fine model performs one comparison on the cross product of all mentions. Dark green color indicates a good match between mention and its assignment candidate, whereas black squares indicate that, due to filtering, no scoring was performed. All values are manually chosen for illustration purposes.", |
| "cite_spans": [ |
| { |
| "start": 262, |
| "end": 282, |
| "text": "(Clark et al., 2020)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 158, |
| "end": 166, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 283, |
| "end": 291, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "mention spans up to a configurable length are enumerated. Mention embeddings are created, containing start and end token embeddings and the attention-weighted average of all span tokens. In contrast to the English models, our models contain neither genre nor speaker embeddings as the German datasets do not supply this information. A na\u00efve approach of comparing each mention candidate with every other to find links between them raises computational issues, quickly becoming infeasible to compute as it requires O(M 2 ) comparisons for M = max mention length \u2022 |D| mention candidates, for a document D where |D| is the document length in word-piece tokens. To reduce computational effort over a na\u00efve approach to find the best antecedent for each mention, we employ two established strategies: A coarse-to-fine and an incremental approach, with the incremental approach being able to handle documents of arbitrary length with limited memory.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Our model is based on the implementation by Xu and Choi (2020) . For each mention span, the model learns a distribution over its antecedents based on how likely both individual spans are to be valid mentions and how likely they to refer to the same entity. Two pruning steps are used to make this mention linking computationally feasible.", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 62, |
| "text": "Xu and Choi (2020)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coarse-to-fine", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "To reduce the number of mentions, all mention embeddings are scored individually with a feedforward neural network (FFNN). For each docu-ment D only the top n = min(4096, 0.4\u2022|D|) mentions are kept after pruning. Instead of performing a pairwise comparison of all N mentions, only a fraction is used. Thus, removing obvious non-mentions and limiting the complexity to O(n 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coarse-to-fine", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "N 2 ), a step that we refer to as mention filtering.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coarse-to-fine", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In the coarse antecedent pruning step, the pairwise similarity scores of the remaining mention embeddings are summed with the individual mention scores. A subsequent fine-grained ranking is performed with the top a = 64 antecedents per mention; to this effect, pairwise mention-antecedent embeddings consisting of mention, antecedent and similarity embedding are created. These embeddings are scored with a FFNN and combined with scores from the coarse step resulting in scores for the top antecedents per mention. We do not use so-called higher-order inference as this effectively doubles the computational cost of the fine-grained antecedent scoring without improving the quality according to Xu and Choi (2020) .", |
| "cite_spans": [ |
| { |
| "start": 695, |
| "end": 713, |
| "text": "Xu and Choi (2020)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coarse-to-fine", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "During training, the model learns to optimize the marginal log-likelihood of possibly correct antecedents for each mention, i.e. for each antecedent the score should be 1 if mention and antecedent belong to the same gold entity, 0 otherwise.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coarse-to-fine", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "During inference, an undirected graph of mentions is created by connecting each mention with its highest-scoring antecedent. In this graph, each connected component of mentions forms an entity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coarse-to-fine", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The general approach of the incremental model follows Xia et al. (2020) and Toshniwal et al. (2020) . Mention filtering is performed as in the courseto-fine model. We process the document iteratively, splitting the document into multiple windows for transformer language model inference. Unlike Toshniwal et al. (2020) but following Xia et al. 2020we reuse all model weights, including both the transformer weights and all task-specific layers.", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 71, |
| "text": "Xia et al. (2020)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 76, |
| "end": 99, |
| "text": "Toshniwal et al. (2020)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 295, |
| "end": 318, |
| "text": "Toshniwal et al. (2020)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incremental", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In a step we call entity assignment, every mention candidate chooses its entity in an iterative fashion. In our standard setup, this is modeled as a classification task with a dynamic number of classes and the initial set of classes, each class representing an entity C 0 = {\u2205}. If, for any mention embedding m being processed, \u2205 is selected as the class, the mention is added as a new class. Entity representations are tracked with R(E n ) being set to m when the n-th entity is added. As a result, after the first mention is processed the set of classes is always extended:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incremental", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "C 1 = {\u2205, E 0 }. Subsequently, new mentions E", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incremental", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "x are added iteratively. Whenever any existing E x is selected as the best fitting entity, its representation is updated using an update gate:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incremental", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "R(E x ) := (1 \u2212 \u03b1)m + \u03b1R(E x ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incremental", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Training is done by means of cross entropy loss across all existing entities and the new entity class, with the gold class for each entity being its most recently assigned mention gold class. As a result, early in training many entity representations likely contain mentions that, from a gold label perspective, should not belong together. Toshniwal et al. (2020) use teacher forcing to address this issue and thereby reach earlier convergence; we test this approach in our setup, assigning each mention to its gold class for further computations, rather than relying on predicted classes.", |
| "cite_spans": [ |
| { |
| "start": 340, |
| "end": 363, |
| "text": "Toshniwal et al. (2020)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incremental", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The only way mention candidates can be discarded (either because they are not a mention or because they are singleton mentions) is by means of creating a new entity and never assigning any additional mentions to it, in postprocessing any such singleton entity would be removed, yielding the final output entities. To support detection of singleton mentions, we follow Xia et al. (2020) in adding an additional class representing the discarding of any given entity. In this \"discard\" scenario, singleton mentions are not removed in postprocessing since non-mentions are modeled explicitly. 5 Experiments: News Domain", |
| "cite_spans": [ |
| { |
| "start": 368, |
| "end": 385, |
| "text": "Xia et al. (2020)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incremental", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We perform preliminary experiments to select the best pre-trained German language model, its best context size and to optimize other hyperparameters. For the main experiments on the news datasets T\u00fcBa-D/Z 10 and SemEval-2010, we train and evaluate our coarse-to-fine model as it is easily capable of processing the typically rather short documents. We use the training, development and test splits as described in Section 3.1. The SemEval dataset contains singletons, but our coarse-to-fine model predicts only clusters of at least two entities. Following Roesiger and Kuhn (2016), we ignore singletons when scoring our systems' predictions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incremental", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We evaluated multiple pre-trained language models for our coreference resolution model. As a baseline, we include the multilingual BERT-Base model (in both the cased and uncased variants) by Devlin et al. (2019) . Chan et al. (2020) recently published German BERT and ELECTRA (cased, both base and large) denoted as GBERT / GELECTRA in Table 2 . In addition, we included another ELECTRA model (uncased, base) by German-NLP-Group denoted as GNG ELECTRA 3 . We find that all of the recent German language models perform better than the multilingual BERT. For the base models, ELECTRA outperforms BERT by a substantial margin. Using large models, ELECTRA performs marginally better. Based on the results shown in Table 3 : T\u00fcBa-D/Z 10 development score of coarse-tofine models GNG ELECTRA (base) and GELECTRA (large) with different segment lengths.", |
| "cite_spans": [ |
| { |
| "start": 191, |
| "end": 211, |
| "text": "Devlin et al. (2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 214, |
| "end": 232, |
| "text": "Chan et al. (2020)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 452, |
| "end": 453, |
| "text": "3", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 336, |
| "end": 343, |
| "text": "Table 2", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 710, |
| "end": 717, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Pre-trained Language Models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Following Joshi et al. 2019, we split documents into non-overlapping ELECTRA contexts, evaluating different splits for contexts as shown in Table 3 . While Joshi et al. (2019) show that for English BERT-base/large a segment length of 128/384 is optimal, this does not hold true for our German models and dataset where larger segment lengths perform better. Our results are in line with the intuition that larger context sizes provide more contextual information for any given mention. Thus, we use a segment length of 512 in our models.", |
| "cite_spans": [ |
| { |
| "start": 156, |
| "end": 175, |
| "text": "Joshi et al. (2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 140, |
| "end": 147, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "ELECTRA Context Size", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In general, parameters affecting computational limits have a large impact, all other parameters that we tested had only limited effect. Parameters controlling the pruning (top span ratio, max top spans and max top antecedents) have a strong negative effect when set too low, resulting in too aggressive pruning. Higher values increase evaluation scores with quickly diminishing returns; yet strongly increase computation time and memory. To reduce GPU memory usage and computation time, we reduced the size of all feed-forward neural networks from 3000 used in previous work to 2048 without seeing distinct score changes on the T\u00fcBa-D/Z 10 development set. We also increased the size to 4096, resulting in more memory usage and slower computation, but negligible changes in evaluation performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hyperparameters", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "For the literary dataset (DROC), we explore the use of both model variants. We initialize the incremental model with weights from the coarse-to-fine variant.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments: Literature Domain", |
| "sec_num": "6" |
| }, |
| { |
| "text": "News-Pretrain Singletons 61.66 \u00b1 0.52 59.93 \u00b1 0.33 65.58 \u00b1 0.46 64.26 \u00b1 0.51 Table 4 : The effect of using pre-training on the DROC coarse-to-fine model on data with and without singletons. All results were averaged over 5 runs and the standard deviation is given.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 77, |
| "end": 84, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "CoNLL-F1", |
| "sec_num": null |
| }, |
| { |
| "text": "Given the relatively small size of the DROC dataset, we explore the impact of pretrained weights from the news tasks. We expected that while the different approaches to mention annotation (heads or entire noun phrases) would somewhat limit applicability of existing weights they would still lead to an improvement. Table 4 shows the development set results for the DROC dataset, with the same set of initial weights that was pretrained on T\u00fcBa-D/Z 10 being used for all of our runs. Standard deviation for the ConLL-F1 scores are given, based on five runs with different random initializations. All layer weights, including task specific ones as well as language model ones were reused. The experiment was repeated for a variant of the DROC dataset with all singleton mentions removed.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 315, |
| "end": 322, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Coarse-to-fine Model", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Using Welch's t-test we can infer that the pertrained version does, on average, perform better for the no singleton variant (p < 0.005). As a result we will use the news-pretrained model variant in all our further experiments. This finding is also supported by the recent publication by (Xia and Durme, 2021) which establishes that, especially for short datasets, using pretrained weights is beneficial. We are unsure if further significant improvements could be gained by pre-training on additional datasets, for example GerDraCor (Pagel and Reiter, 2020) , given that T\u00fcBa-D/Z is already a large dataset. Table 5 shows how two configuration parameters affect the coarse-to-fine model's performance. The two options enable different features, where \"segment info\" describes how many BERT segments lie between the current and candidate mention while \"token info\" describes the token distance from the candidate mention to the document start. Further, \"token info\" encodes the length of the candidate mention span. This experiment was performed as we saw a recency bias in terms of connecting mentions in our early result explorations (see Section 7), an effect that could be caused by these distance based features. On average, the variant without token distance representation performs significantly better than the the one with both features enabled (p < 0.001). We attribute this to a greater mention recency bias that is encouraged by the additional features.", |
| "cite_spans": [ |
| { |
| "start": 287, |
| "end": 308, |
| "text": "(Xia and Durme, 2021)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 532, |
| "end": 556, |
| "text": "(Pagel and Reiter, 2020)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 607, |
| "end": 614, |
| "text": "Table 5", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Coarse-to-fine Model", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "The memory usage of the coarse-to-fine approach, while not prohibitive for the DROC dataset, will prevent its application to full length literary documents. Table 5 illustrates the impact of the same configuration parameters that were used for the coarse-tofine model. The impact of the parameters appears to be lessened in the incremental case.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 157, |
| "end": 164, |
| "text": "Table 5", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Incremental Model", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "Unsurprisingly, due to the possibility of handling singleton mentions, Table 6 clearly shows that the discard functionality is critical to model performance. Teacher forcing appears to have a negative impact on performance; this does come as a surprise but while convergence early in training was faster the final results were slightly worse.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 71, |
| "end": 78, |
| "text": "Table 6", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Incremental Model", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We seek to analyze how well incremental models fare as document length increases. To this end, we split DROC at the nearest sentence boundary into sub documents that are no longer than 512, Table 7 : Results of our coarse-to-fine models and previous systems on the test set of T\u00fcBa-D/Z 10 and SemEval-2010 (without singletons). IMS HotCoref and CorZu scores as reported by Roesiger and Kuhn (2016) . Full metrics in Table 10 in the appendix.", |
| "cite_spans": [ |
| { |
| "start": 373, |
| "end": 397, |
| "text": "Roesiger and Kuhn (2016)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 190, |
| "end": 197, |
| "text": "Table 7", |
| "ref_id": null |
| }, |
| { |
| "start": 416, |
| "end": 424, |
| "text": "Table 10", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of Document Length", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "1024 and 2048 tokens. Previous work (Krug, 2020; Joshi et al., 2019) has established that, with longer documents, the performance of coreference systems drops. This can be interpreted as the inherent difficulty of the coreference task growing with document length. Figure 3 shows that for longer documents the gap in performance between the model variants increases slightly.", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 48, |
| "text": "(Krug, 2020;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 49, |
| "end": 68, |
| "text": "Joshi et al., 2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 265, |
| "end": 273, |
| "text": "Figure 3", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of Document Length", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "Our neural coarse-to-fine models outperform the previous state of the art by a large margin on both SemEval-2010 (+25.85 F1) and T\u00fcBa-D/Z (+30.25 F1) as shown in Table 7 . In fact, even if the other systems are allowed to use gold mentions, our models still outperform them by more than 10 F1 points. Using ELECTRA large for contextual embeddings yields a small improvement over the base model (+1.58 F1 / +1.92 F1). Figure 4 shows an example of our systems prediction on an unseen document.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 162, |
| "end": 169, |
| "text": "Table 7", |
| "ref_id": null |
| }, |
| { |
| "start": 417, |
| "end": 425, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results & Error Analysis", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We manually analyze the predictions of our coarse-to-fine model and find that it generally produces accurate coreference links both locally and document-wide. While entity assignment of mentions, identified in both prediction and gold data, is typically correct, missed and added mentions are more frequent errors. We assume that one reason is a contradicting training signal, i.e. while some mentions are annotated as such in the gold data, others are not because they are singletons or were missed in the annotation process. Our incremental model on data including singletons outperforms the existing state of the art for DROC by 11.6 F1 points (see Table 8 ). Said results were achieved in a setup comparable to ours, with no gold information such as speakers or entity spans being used, except in the case of their end-to-end neural network (E2E-NN), where direct speech and speaker information were used.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 652, |
| "end": 659, |
| "text": "Table 8", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results & Error Analysis", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We manually evaluate our model on entire literary texts. While we find local coreference relationships to be surprisingly accurate, when taking a (b) Model without token distance feature Figure 5 : We observe a recency bias that appears to, in this case, be fixed by not including an explicit token distance feature. The term \"Gro\u00dfmutter\" (grandmother) is linked to the term \"Mutter\" (mother). 4 more global view, some of our model's weaknesses are exposed. When searching the token \"Holmes\" in the German translation of \"The Hound of the Baskervilles\" 5 which should always refer to the same character we find the 212 tokens to occur in 31 different clusters with 4 mentions being assigned to no cluster. Our observation is that this often occurs after a long section of text without explicit mentions of the name, in fact the average distance from one mention of Holmes to the previous is 320.6 tokens whereas it is 655.3 for those cases where a new class is erroneously introduced. We suspect, that this could be attributed to the name taking less prominence in the entity representation after a while. Figure 5a illustrates a recency bias in our model, \"grandmother\" and \"mother\" were erroneously combined into one entity, presumably because the distance between the \"mother\" and \"grandmother\" mentions were very small. On a larger scale this effect can be observable as long sequences of the same cluster forming, an effect that is especially prominent in our incremental models. This observation motivated our experiments with removing distance features (see Table 5 ), resulting in an improved model and, in this case (as seen in Figure 5b ), an improved result. However, this particular model no longer detects \"thing\" (Ding) as a valid mention which could both be a side effect of removing the distance features or an effect of the random initialization and training.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 187, |
| "end": 195, |
| "text": "Figure 5", |
| "ref_id": null |
| }, |
| { |
| "start": 1106, |
| "end": 1115, |
| "text": "Figure 5a", |
| "ref_id": null |
| }, |
| { |
| "start": 1565, |
| "end": 1572, |
| "text": "Table 5", |
| "ref_id": "TABREF7" |
| }, |
| { |
| "start": 1637, |
| "end": 1646, |
| "text": "Figure 5b", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results & Error Analysis", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We apply recent developments in neural architectures for coreference resolution on German data and achieve a substantial improvement over the previous state of the art on all three established German datasets. We conducted experiments with two variants: a coarse-to-fine model suitable for rather short documents, and an incremental model that should scale to long documents. In our analysis we found that while the task of coreference resolution itself becomes more difficult as document sizes increase, the incremental approach scales worse than the course-to-fine approach in terms of accuracy. While we found local decisions to be accurate, shortcomings of the incremental model in global consistency and recency bias were explored.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "In future work, we would especially like to address remaining challenges for the processing of long-form literary documents. In spite of the large improvements we achieved, there is still a considerable headroom for coreference resolution, as reflected by a large performance gap between the human baseline of 82.54 F1 and our best model with 64.7 F1 on the DROC dataset. On a more theoretic note, another extension worth pursuing in the future especially for the literary domain is the notion of subjective coreference. As an example, in the fairy tale \"Little Red Riding Hood\" (see Figure 5 ), the girl temporarily perceives a highly plot-relevant coreference between the grandmother and the big bad wolf, which is not reflected in objectivized models. Table 10 : Recall, precision and F1 score on the test set of T\u00fcBa-D/Z 10 and SemEval-2010 (without singletons).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 584, |
| "end": 592, |
| "text": "Figure 5", |
| "ref_id": null |
| }, |
| { |
| "start": 755, |
| "end": 763, |
| "text": "Table 10", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Our coarse-to-fine (c2f) models use either ELECTRA base or large. IMS HotCoref and CorZu system scores as reported by Roesiger and Kuhn (2016) .", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 142, |
| "text": "Roesiger and Kuhn (2016)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Appendix", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/uhh-lt/ neural-coref/tree/konvens", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Model description at https://huggingface.co/german-nlp-group/ electra-base-german-uncased", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Text from: https://www.projekt-gutenberg. org/bechstei/maerchen/chap053.html Es war einmal ein gar allerliebstes, niedliches Ding von einen [M\u00e4dchen] 1 , [das] 1 hatte eine [Mutter] 2 und eine [Gro\u00dfmutter] 2 , die waren gar gut und hatten das kleine [Ding] 1 so lieb. Die [Gro\u00dfmutter] 2 absonderlich, [die] 2 wu\u00dfte gar nicht, wie gut sie ' s mit dem [Enkelchen] 1 meinen sollte[...]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.projekt-gutenberg.org/ doyle/basker-1/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was, in part, supported by the DFG through the project \"Evaluating Events in Narrative Theory (EvENT)\" (grants BI 1544/11-1 and GI 1105/3-1) as part of the priority program \"Computational Literary Studies (CLS)\" (SPP 2207). This work was partly supported by the Cluster of Excellence CLICCS (EXC 2037), Universit\u00e4t Hamburg, funded through the German Research Foundation (DFG).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Bisher keine Erkl\u00e4rung f\u00fcr", |
| "authors": [], |
| "year": null, |
| "venue": "Der Eindruck, da\u00df sich die Unf\u00e4lle bei", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "[Bahn-Chef] 1 legt Statistik vor. Bisher keine Erkl\u00e4rung f\u00fcr [das Ungl\u00fcck von [Eschede] 3 ] 2 [Frankfurt] 4 ( taz ) -Der Eindruck, da\u00df sich die Unf\u00e4lle bei [der Bahn ] 5", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Berichterstattung der Medien \" provoziert, erkl\u00e4rte [der Vorstandsvorsitzende", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Die", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "h\u00e4uften, sei nur durch die \" Berichterstattung der Medien \" provoziert, erkl\u00e4rte [der Vorstandsvorsitzende [der Deutschen Bahn AG ] 5 , Johannes Ludewig ( CDU ] 1 ), gestern in [Frankfurt] 4 . Zum bevorstehenden ersten Jahrestag [der ICE-Katastrophe von [Eschede] 3 ] 2 (", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "References David Bamman, Olivia Lewke, and Anya Mansoor. 2020. An annotated dataset of coreference in English literature", |
| "authors": [], |
| "year": null, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "44--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juni ) verwies [Ludewig] 1 auf [die -bahneigene - Statistik ] 6 . [...] References David Bamman, Olivia Lewke, and Anya Mansoor. 2020. An annotated dataset of coreference in En- glish literature. In Proceedings of the 12th Lan- guage Resources and Evaluation Conference, pages 44-54, Marseille, France. European Language Re- sources Association.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A Bayesian mixed effects model of literary character", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Bamman", |
| "suffix": "" |
| }, |
| { |
| "first": "Ted", |
| "middle": [], |
| "last": "Underwood", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "370--379", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P14-1035" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Bamman, Ted Underwood, and Noah A. Smith. 2014. A Bayesian mixed effects model of literary character. In Proceedings of the 52nd Annual Meet- ing of the Association for Computational Linguis- tics (Volume 1: Long Papers), pages 370-379, Balti- more, Maryland. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Learning Structured Perceptrons for Coreference Resolution with Latent Antecedents and Non-local Features", |
| "authors": [ |
| { |
| "first": "Anders", |
| "middle": [], |
| "last": "Bj\u00f6rkelund", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Kuhn", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "47--57", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P14-1005" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anders Bj\u00f6rkelund and Jonas Kuhn. 2014. Learning Structured Perceptrons for Coreference Resolution with Latent Antecedents and Non-local Features. In Proceedings of the 52nd Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 47-57, Baltimore, Maryland. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "German's next language model", |
| "authors": [ |
| { |
| "first": "Branden", |
| "middle": [], |
| "last": "Chan", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Schweter", |
| "suffix": "" |
| }, |
| { |
| "first": "Timo", |
| "middle": [], |
| "last": "M\u00f6ller", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "6788--6796", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.coling-main.598" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Branden Chan, Stefan Schweter, and Timo M\u00f6ller. 2020. German's next language model. In Proceed- ings of the 28th International Conference on Com- putational Linguistics, pages 6788-6796, Barcelona, Spain (Online). International Committee on Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "ELECTRA: Pretraining text encoders as discriminators rather than generators", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "8th International Conference on Learning Representations", |
| "volume": "2020", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Clark, Minh-Thang Luong, Quoc V. Le, and Christopher D. Manning. 2020. ELECTRA: Pre- training text encoders as discriminators rather than generators. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia. OpenReview.net.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Deep reinforcement learning for mention-ranking coreference models", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2256--2262", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1245" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Clark and Christopher D. Manning. 2016a. Deep reinforcement learning for mention-ranking coreference models. In Proceedings of the 2016 Conference on Empirical Methods in Natural Lan- guage Processing, pages 2256-2262, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Improving coreference resolution by learning entitylevel distributed representations", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "643--653", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1061" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Clark and Christopher D. Manning. 2016b. Im- proving coreference resolution by learning entity- level distributed representations. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 643-653, Berlin, Germany. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/neco.1997.9.8.1735" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "SpanBERT: Improving pre-training by representing and predicting spans", |
| "authors": [ |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "S" |
| ], |
| "last": "Weld", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "8", |
| "issue": "", |
| "pages": "64--77", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00300" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mandar Joshi, Danqi Chen, Yinhan Liu, Daniel S. Weld, Luke Zettlemoyer, and Omer Levy. 2020. SpanBERT: Improving pre-training by representing and predicting spans. Transactions of the Associa- tion for Computational Linguistics, 8:64-77.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "BERT for coreference resolution: Baselines and analysis", |
| "authors": [ |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Weld", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5803--5808", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1588" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mandar Joshi, Omer Levy, Luke Zettlemoyer, and Daniel Weld. 2019. BERT for coreference reso- lution: Baselines and analysis. In Proceedings of the 2019 Conference on Empirical Methods in Nat- ural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5803-5808, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Coreference resolution with entity equalization", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Kantor", |
| "suffix": "" |
| }, |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Globerson", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "673--677", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1066" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben Kantor and Amir Globerson. 2019. Coreference resolution with entity equalization. In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 673-677, Flo- rence, Italy. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "An Incremental Entity-Mention Model for Coreference Resolution with Restrictive Antecedent Accessibility", |
| "authors": [ |
| { |
| "first": "Manfred", |
| "middle": [], |
| "last": "Klenner", |
| "suffix": "" |
| }, |
| { |
| "first": "Don", |
| "middle": [], |
| "last": "Tuggener", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "178--185", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manfred Klenner and Don Tuggener. 2011. An In- cremental Entity-Mention Model for Coreference Resolution with Restrictive Antecedent Accessibil- ity. In Proceedings of the International Conference Recent Advances in Natural Language Processing 2011, pages 178-185, Hissar, Bulgaria. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Techniques for the Automatic Extraction of Character Networks in German Historic Novels", |
| "authors": [ |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Krug", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Markus Krug. 2020. Techniques for the Automatic Ex- traction of Character Networks in German Historic Novels. Ph.D. thesis, Universit\u00e4t W\u00fcrzburg.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Description of a corpus of character references in German novels-DROC", |
| "authors": [ |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Krug", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukas", |
| "middle": [], |
| "last": "Weimer", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabella", |
| "middle": [], |
| "last": "Reger", |
| "suffix": "" |
| }, |
| { |
| "first": "Luisa", |
| "middle": [], |
| "last": "Macharowsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Feldhaus", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Puppe", |
| "suffix": "" |
| }, |
| { |
| "first": "Fotis", |
| "middle": [], |
| "last": "Jannidis", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "DARIAH-DE Working Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Markus Krug, Lukas Weimer, Isabella Reger, Luisa Macharowsky, Stephan Feldhaus, Frank Puppe, and Fotis Jannidis. 2018. Description of a corpus of character references in German novels-DROC [Deutsches ROman Corpus]. DARIAH-DE Working Papers, 27.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Stanford's multi-pass sieve coreference resolution system at the CoNLL-2011 shared task", |
| "authors": [ |
| { |
| "first": "Heeyoung", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Peirsman", |
| "suffix": "" |
| }, |
| { |
| "first": "Angel", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathanael", |
| "middle": [], |
| "last": "Chambers", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Fifteenth Conference on Computational Natural Language Learning: Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "28--34", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heeyoung Lee, Yves Peirsman, Angel Chang, Nathanael Chambers, Mihai Surdeanu, and Dan Jurafsky. 2011. Stanford's multi-pass sieve corefer- ence resolution system at the CoNLL-2011 shared task. In Proceedings of the Fifteenth Conference on Computational Natural Language Learning: Shared Task, pages 28-34, Portland, Oregon. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "End-to-end neural coreference resolution", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luheng", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "188--197", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1018" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Lee, Luheng He, Mike Lewis, and Luke Zettle- moyer. 2017. End-to-end neural coreference reso- lution. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 188-197, Copenhagen, Denmark. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Higher-order coreference resolution with coarse-tofine inference", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luheng", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "687--692", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-2108" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Lee, Luheng He, and Luke Zettlemoyer. 2018. Higher-order coreference resolution with coarse-to- fine inference. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 2 (Short Papers), pages 687-692, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Using coreference for question answering", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Morton", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Coreference and Its Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "85--89", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas S. Morton. 1999. Using coreference for ques- tion answering. In Coreference and Its Applications, pages 85-89, College Park, Maryland. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Manual for the annotation of in-document referential relations", |
| "authors": [ |
| { |
| "first": "Karin", |
| "middle": [], |
| "last": "Naumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Vera", |
| "middle": [], |
| "last": "M\u00f6ller", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karin Naumann and Vera M\u00f6ller. 2006. Manual for the annotation of in-document referential relations. Technical report, Universit\u00e4t T\u00fcbingen.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "GerDraCor-coref: A coreference corpus for dramatic texts in German", |
| "authors": [ |
| { |
| "first": "Janis", |
| "middle": [], |
| "last": "Pagel", |
| "suffix": "" |
| }, |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Reiter", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "55--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Janis Pagel and Nils Reiter. 2020. GerDraCor-coref: A coreference corpus for dramatic texts in German. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 55-64, Marseille, France. European Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Pytorch: An imperative style, high-performance deep learning library", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Paszke", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Massa", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Lerer", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Bradbury", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Chanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Killeen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zeming", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Gimelshein", |
| "suffix": "" |
| }, |
| { |
| "first": "Luca", |
| "middle": [], |
| "last": "Antiga", |
| "suffix": "" |
| }, |
| { |
| "first": "Alban", |
| "middle": [], |
| "last": "Desmaison", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Kopf", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zachary", |
| "middle": [], |
| "last": "Devito", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Raison", |
| "suffix": "" |
| }, |
| { |
| "first": "Alykhan", |
| "middle": [], |
| "last": "Tejani", |
| "suffix": "" |
| }, |
| { |
| "first": "Sasank", |
| "middle": [], |
| "last": "Chilamkurthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Benoit", |
| "middle": [], |
| "last": "Steiner", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "Junjie", |
| "middle": [], |
| "last": "Bai", |
| "suffix": "" |
| }, |
| { |
| "first": "Soumith", |
| "middle": [], |
| "last": "Chintala", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "32", |
| "issue": "", |
| "pages": "8024--8035", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Te- jani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. Pytorch: An imperative style, high-performance deep learn- ing library. In Advances in Neural Information Pro- cessing Systems 32, pages 8024-8035. Curran Asso- ciates, Inc.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1202" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "CoNLL-2012 shared task: Modeling multilingual unrestricted coreference in OntoNotes", |
| "authors": [ |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Sameer Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Moschitti", |
| "suffix": "" |
| }, |
| { |
| "first": "Olga", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuchen", |
| "middle": [], |
| "last": "Uryupina", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Joint Conference on EMNLP and CoNLL -Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "1--40", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sameer Pradhan, Alessandro Moschitti, Nianwen Xue, Olga Uryupina, and Yuchen Zhang. 2012. CoNLL- 2012 shared task: Modeling multilingual unre- stricted coreference in OntoNotes. In Joint Confer- ence on EMNLP and CoNLL -Shared Task, pages 1-40, Jeju Island, Korea. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "SemEval-2010 task 1: Coreference resolution in multiple languages", |
| "authors": [ |
| { |
| "first": "Marta", |
| "middle": [], |
| "last": "Recasens", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Emili", |
| "middle": [], |
| "last": "Sapena", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "Ant\u00f2nia" |
| ], |
| "last": "Mart\u00ed", |
| "suffix": "" |
| }, |
| { |
| "first": "Mariona", |
| "middle": [], |
| "last": "Taul\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "V\u00e9ronique", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| }, |
| { |
| "first": "Yannick", |
| "middle": [], |
| "last": "Versley", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 5th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marta Recasens, Llu\u00eds M\u00e0rquez, Emili Sapena, M. Ant\u00f2nia Mart\u00ed, Mariona Taul\u00e9, V\u00e9ronique Hoste, Massimo Poesio, and Yannick Versley. 2010. SemEval-2010 task 1: Coreference resolution in multiple languages. In Proceedings of the 5th Inter- national Workshop on Semantic Evaluation, pages 1-8, Uppsala, Sweden. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "IMS HotCoref DE: A Data-driven Co-reference Resolver for German", |
| "authors": [ |
| { |
| "first": "Ina", |
| "middle": [], |
| "last": "Roesiger", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Kuhn", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
| "volume": "", |
| "issue": "", |
| "pages": "155--160", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ina Roesiger and Jonas Kuhn. 2016. IMS HotCoref DE: A Data-driven Co-reference Resolver for Ger- man. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 155-160, Portoro\u017e, Slovenia. Eu- ropean Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Towards Coreference for Literary Text: Analyzing Domain-Specific Phenomena", |
| "authors": [ |
| { |
| "first": "Ina", |
| "middle": [], |
| "last": "Roesiger", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Schulz", |
| "suffix": "" |
| }, |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Reiter", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Second Joint SIGHUM Workshop on Computational Linguistics for Cultural Heritage, Social Sciences, Humanities and Literature", |
| "volume": "", |
| "issue": "", |
| "pages": "129--138", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ina Roesiger, Sarah Schulz, and Nils Reiter. 2018. Towards Coreference for Literary Text: Analyzing Domain-Specific Phenomena. In Proceedings of the Second Joint SIGHUM Workshop on Computa- tional Linguistics for Cultural Heritage, Social Sci- ences, Humanities and Literature, pages 129-138, Santa Fe, New Mexico. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Two uses of anaphora resolution in summarization", |
| "authors": [ |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Steinberger", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mijail", |
| "suffix": "" |
| }, |
| { |
| "first": "Karel", |
| "middle": [], |
| "last": "Kabadjov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Je\u017eek", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Information Processing & Management", |
| "volume": "43", |
| "issue": "", |
| "pages": "1663--1680", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Josef Steinberger, Massimo Poesio, Mijail A. Kabad- jov, and Karel Je\u017eek. 2007. Two uses of anaphora resolution in summarization. Information Process- ing & Management, 43(6):1663-1680.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Stylebook for the t\u00fcbingen treebank of written german (T\u00fcBa-D/Z)", |
| "authors": [ |
| { |
| "first": "Heike", |
| "middle": [], |
| "last": "Telljohann", |
| "suffix": "" |
| }, |
| { |
| "first": "Erhard", |
| "middle": [ |
| "W" |
| ], |
| "last": "Hinrichs", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandra", |
| "middle": [], |
| "last": "K\u00fcbler", |
| "suffix": "" |
| }, |
| { |
| "first": "Heike", |
| "middle": [], |
| "last": "Zinsmeister", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathrin", |
| "middle": [], |
| "last": "Beck", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Seminar f\u00fcr Sprachwissenschaft", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heike Telljohann, Erhard W. Hinrichs, Sandra K\u00fcbler, Heike Zinsmeister, and Kathrin Beck. 2017. Style- book for the t\u00fcbingen treebank of written german (T\u00fcBa-D/Z). In Seminar f\u00fcr Sprachwissenschaft, Universit\u00e4t T\u00fcbingen, Germany.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Learning to Ignore: Long Document Coreference with Bounded Memory Neural Networks", |
| "authors": [ |
| { |
| "first": "Shubham", |
| "middle": [], |
| "last": "Toshniwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Wiseman", |
| "suffix": "" |
| }, |
| { |
| "first": "Allyson", |
| "middle": [], |
| "last": "Ettinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Livescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "8519--8526", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.685" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shubham Toshniwal, Sam Wiseman, Allyson Ettinger, Karen Livescu, and Kevin Gimpel. 2020. Learn- ing to Ignore: Long Document Coreference with Bounded Memory Neural Networks. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8519-8526, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "A Hybrid Entity-Mention Pronoun Resolution Model for German Using Markov Logic Networks", |
| "authors": [ |
| { |
| "first": "Don", |
| "middle": [], |
| "last": "Tuggener", |
| "suffix": "" |
| }, |
| { |
| "first": "Manfred", |
| "middle": [], |
| "last": "Klenner", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 12th Edition of the Konvens Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "21--29", |
| "other_ids": { |
| "DOI": [ |
| "10.5167/uzh-99594" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Don Tuggener and Manfred Klenner. 2014. A Hy- brid Entity-Mention Pronoun Resolution Model for German Using Markov Logic Networks. In Pro- ceedings of the 12th Edition of the Konvens Confer- ence, pages 21-29, Hildesheim, Germany. Univer- sit\u00e4tsbibliothek Hildesheim.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "OntoNotes release 5.0 ldc2013t19. Linguistic Data Consortium, Philadelphia", |
| "authors": [ |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Weischedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitchell", |
| "middle": [], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Lance", |
| "middle": [], |
| "last": "Ramshaw", |
| "suffix": "" |
| }, |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Ann", |
| "middle": [], |
| "last": "Taylor", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Kaufman", |
| "suffix": "" |
| }, |
| { |
| "first": "Michelle", |
| "middle": [], |
| "last": "Franchini", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ralph Weischedel, Martha Palmer, Mitchell Marcus, Eduard Hovy, Sameer Pradhan, Lance Ramshaw, Ni- anwen Xue, Ann Taylor, Jeff Kaufman, Michelle Franchini, et al. 2013. OntoNotes release 5.0 ldc2013t19. Linguistic Data Consortium, Philadel- phia, Pennsylvania, 23.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Learning anaphoricity and antecedent ranking features for coreference resolution", |
| "authors": [ |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Wiseman", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rush", |
| "suffix": "" |
| }, |
| { |
| "first": "Stuart", |
| "middle": [], |
| "last": "Shieber", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1416--1426", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P15-1137" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sam Wiseman, Alexander M. Rush, Stuart Shieber, and Jason Weston. 2015. Learning anaphoricity and an- tecedent ranking features for coreference resolution. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Lan- guage Processing (Volume 1: Long Papers), pages 1416-1426, Beijing, China. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Learning global features for coreference resolution", |
| "authors": [ |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Wiseman", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rush", |
| "suffix": "" |
| }, |
| { |
| "first": "Stuart", |
| "middle": [ |
| "M" |
| ], |
| "last": "Shieber", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "994--1004", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N16-1114" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sam Wiseman, Alexander M. Rush, and Stuart M. Shieber. 2016. Learning global features for coref- erence resolution. In Proceedings of the 2016 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 994-1004, San Diego, California. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "CorefQA: Coreference resolution as query-based span prediction", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Arianna", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "6953--6963", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.622" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Wu, Fei Wang, Arianna Yuan, Fei Wu, and Ji- wei Li. 2020. CorefQA: Coreference resolution as query-based span prediction. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 6953-6963, Online. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Moving on from OntoNotes: Coreference resolution model transfer", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Xia and Benjamin Van Durme. 2021. Moving on from OntoNotes: Coreference resolution model transfer. CoRR, abs/2104.08457.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Incremental neural coreference resolution in constant memory", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Jo\u00e3o", |
| "middle": [], |
| "last": "Sedoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "8617--8624", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.695" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Xia, Jo\u00e3o Sedoc, and Benjamin Van Durme. 2020. Incremental neural coreference resolution in constant memory. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 8617-8624, Online. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Revealing the myth of higher-order inference in coreference resolution", |
| "authors": [ |
| { |
| "first": "Liyan", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Jinho", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "8527--8533", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.686" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liyan Xu and Jinho D. Choi. 2020. Revealing the myth of higher-order inference in coreference resolution. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8527-8533, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "text": "The performance of incremental systems compared to coarse-to-fine model as document lengths increases.", |
| "type_str": "figure" |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td/><td>Input Text</td><td/><td colspan=\"4\">Coarse Filter</td><td/><td/><td/><td colspan=\"4\">Fine Ranking</td><td/><td/><td>Output Entities</td><td/></tr><tr><td colspan=\"2\">Mention Generation I am called Bob Mention Ranking & Filtering</td><td colspan=\"2\">called I I am</td><td>I</td><td>I am</td><td>called</td><td>Bob</td><td/><td colspan=\"2\">called I I am</td><td>I</td><td>I am</td><td>called</td><td>Bob</td><td/><td>{Bob, I} {called}</td><td>Coarse-to-fine</td></tr><tr><td>\u2022 I</td><td>1. Bob</td><td/><td>Bob</td><td/><td/><td/><td/><td/><td colspan=\"2\">Bob</td><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>\u2022 I am</td><td>2. I</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>\u2022 am</td><td>3. called</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>\u2022 am called</td><td>4. I am</td><td/><td/><td/><td/><td/><td colspan=\"5\">Entity Assignment</td><td/><td/><td/><td/><td>Output Entities</td><td/></tr><tr><td>\u2022 called \u2022 called Bob \u2022 Bob</td><td>5. called Bob 6. am 7. am called Mention Proposal</td><td>I</td><td>\u2205</td><td>I am</td><td>\u2205</td><td>{I}</td><td>called</td><td>\u2205</td><td>{I}</td><td>{I am}</td><td>Bob</td><td>\u2205</td><td>{I}</td><td>{I am}</td><td>{called}</td><td>{I} {I am} {called} {Bob, I}</td><td>Incremental</td></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "html": null, |
| "text": "embeddings are obtained for each token and all possible" |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td>: T\u00fcBa-D/Z 10 development score of coarse-to-</td></tr><tr><td>fine models with different language models (using 512</td></tr><tr><td>as segment size)</td></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "html": null, |
| "text": "" |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td colspan=\"3\">Segment Length F1 (base) F1 (large)</td></tr><tr><td>128</td><td>75.69</td><td>76.28</td></tr><tr><td>256</td><td>76.56</td><td>77.29</td></tr><tr><td>384</td><td>77.01</td><td>78.51</td></tr><tr><td>512</td><td>77.50</td><td>79.27</td></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "html": null, |
| "text": ", we selected GNG ELECTRA as the base and GELECTRA as the large model for our remaining experiments." |
| }, |
| "TABREF7": { |
| "content": "<table><tr><td colspan=\"3\">: Performance of the coarse-to-fine and incre-</td></tr><tr><td colspan=\"3\">mental models with respect to two configuration param-</td></tr><tr><td colspan=\"2\">eters relevant to recency bias.</td><td/></tr><tr><td>CoNLL-F1</td><td colspan=\"2\">Teacher Forcing</td></tr><tr><td>Discard</td><td>63.92 58.52</td><td>65.42 57.27</td></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "html": null, |
| "text": "" |
| }, |
| "TABREF8": { |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null, |
| "html": null, |
| "text": "" |
| }, |
| "TABREF11": { |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null, |
| "html": null, |
| "text": "Final results for the DROC dataset on the test set, with and without singleton mentions included." |
| }, |
| "TABREF14": { |
| "content": "<table><tr><td/><td/><td>MUC</td><td/><td/><td>B 3</td><td/><td/><td>CEAF \u03c64</td><td/><td>CoNLL</td><td/><td>LEA</td></tr><tr><td/><td>R</td><td>P</td><td>F1</td><td>R</td><td>P</td><td>F1</td><td>R</td><td>P</td><td>F1</td><td>F1</td><td>R</td><td>P</td><td>F1</td></tr><tr><td/><td/><td/><td/><td/><td colspan=\"2\">T\u00fcBa-D/Z 10.0</td><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>78.79</td><td colspan=\"3\">73.25 73.25 74.67</td></tr><tr><td>IMS HotCoref</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>48.54</td><td/><td/></tr><tr><td>+ gold mentions</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>65.76</td><td/><td/></tr><tr><td/><td/><td/><td/><td/><td colspan=\"2\">SemEval-2010</td><td/><td/><td/><td/><td/><td/></tr><tr><td>German c2f base</td><td colspan=\"10\">76.64 74.46</td><td colspan=\"3\">70.69 67.18 68.89</td></tr><tr><td>IMS HotCoref</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>48.61</td><td/><td/></tr><tr><td>+ gold mentions</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>63.61</td><td/><td/></tr><tr><td>CorZu</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>45.82</td><td/><td/></tr><tr><td>+ gold mentions</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>58.11</td><td/><td/></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "html": null, |
| "text": "Overview of the dataset releases referred to in this work. German c2f base 81.92 79.90 80.90 77.41 73.52 75.41 75.16 75.50 75.33 77.21 74.98 70.82 72.84 German c2f large 82.85 81.61 82.23 78.41 75.73 77.05 76.75 77.44 77.09 76.08 76.36 71.18 69.12 70.14 71.83 70.45 71.13 72.54 67.88 65.7 66.77 German c2f large 79.07 76.51 77.77 73.88 70.48 72.14 74.79 72.21 73.47" |
| } |
| } |
| } |
| } |