| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:21:35.167173Z" |
| }, |
| "title": "Improving Span Representation for Domain-adapted Coreference Resolution", |
| "authors": [ |
| { |
| "first": "Nupoor", |
| "middle": [], |
| "last": "Gandhi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Washington", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Anjalie", |
| "middle": [], |
| "last": "Field", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Washington", |
| "location": {} |
| }, |
| "email": "anjalief@cs.cmu.edu" |
| }, |
| { |
| "first": "Yulia", |
| "middle": [], |
| "last": "Tsvetkov", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Washington", |
| "location": {} |
| }, |
| "email": "yuliats@cs.washington.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Recent work has shown fine-tuning neural coreference models can produce strong performance when adapting to different domains. However, at the same time, this can require a large amount of annotated target examples. In this work, we focus on supervised domain adaptation for clinical notes, proposing the use of concept knowledge to more efficiently adapt coreference models to a new domain. We develop methods to improve the span representations via (1) a retrofitting loss to incentivize span representations to satisfy a knowledgebased distance function and (2) a scaffolding loss to guide the recovery of knowledge from the span representation. By integrating these losses, our model is able to improve our baseline precision and F-1 score. In particular, we show that incorporating knowledge with endto-end coreference models results in better performance on the most challenging, domainspecific spans 1 .", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Recent work has shown fine-tuning neural coreference models can produce strong performance when adapting to different domains. However, at the same time, this can require a large amount of annotated target examples. In this work, we focus on supervised domain adaptation for clinical notes, proposing the use of concept knowledge to more efficiently adapt coreference models to a new domain. We develop methods to improve the span representations via (1) a retrofitting loss to incentivize span representations to satisfy a knowledgebased distance function and (2) a scaffolding loss to guide the recovery of knowledge from the span representation. By integrating these losses, our model is able to improve our baseline precision and F-1 score. In particular, we show that incorporating knowledge with endto-end coreference models results in better performance on the most challenging, domainspecific spans 1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Recent work has achieved high performance on coreference resolution in standard benchmark datasets like OntoNotes (Kirstain et al., 2021; Joshi et al., 2020; Weischedel et al., 2013) . However, in many real world settings where coreference resolution would be valuable, text differs greatly from these standard datasets. For example, coreference resolution over clinical notes can enable tracking a patient's progress and treatment history. However, clinical notes contain acronyms and medical terminology. Annotating new training data for every domain of interest is expensive and time-consuming, and coreference models trained on existing benchmark datasets perform worse on other domains (Srivastava et al., 2020; Xu and Choi, 2020; Joshi et al., 2020) . In this work, we develop a domainadaptation model for coreference resolution that requires only a small number of target training examples and target domain knowledge.", |
| "cite_spans": [ |
| { |
| "start": 114, |
| "end": 137, |
| "text": "(Kirstain et al., 2021;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 138, |
| "end": 157, |
| "text": "Joshi et al., 2020;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 158, |
| "end": 182, |
| "text": "Weischedel et al., 2013)", |
| "ref_id": null |
| }, |
| { |
| "start": 691, |
| "end": 716, |
| "text": "(Srivastava et al., 2020;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 717, |
| "end": 735, |
| "text": "Xu and Choi, 2020;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 736, |
| "end": 755, |
| "text": "Joshi et al., 2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our primary approach involves incorporating domain-knowledge into the span representations learned by an end-to-end neural system (Lee et al., 2017) . A span representation is a vector representation of a contiguous set of tokens. When determining if a given mention refers to an antecedent, span representations are used by the model to (1.) select a set of candidate mentions and (2.) select an antecedent from the candidates for the given mention. Thus, a high-quality span representation encodes the semantic meaning of the span tokens and their local context. Joshi et al. (2020) introduced Span-BERT, a pre-training method extending BERT, designed to improve performance on span-selection tasks that involves masking contiguous spans rather than tokens. Span representations are derived by concatenating the pre-trained transformer outputs at the boundary tokens with an attention-weighted vector over the span tokens. These representations are fed into a coreference resolution model, thus integrating SpanBERT into an end-to-end coreference resolution system.", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 148, |
| "text": "(Lee et al., 2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 565, |
| "end": 584, |
| "text": "Joshi et al. (2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "SpanBERT is able to capture coreference structure implicitly in rich span representations. The expressiveness of the SpanBERT representation is apparent from extrinsic coreference performance, but also through probing tasks that have shown that span representations can capture headedness, coreference arcs, and other linguistic features of coreference (Kahardipraja et al., 2020) . The best coreference performance and span representations are obtained by training the end-to-end model with SpanBERT using labeled coreference data.", |
| "cite_spans": [ |
| { |
| "start": 353, |
| "end": 380, |
| "text": "(Kahardipraja et al., 2020)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "When adapting a coreference model to a new domain, fine-tuning or continued training can greatly improve performance, but this approach can be computationally expensive and requires a large amount of labelled documents from the target domain (Gururangan et al., 2020; Xia and Van Durme, 2021) . Neural models have also been criticized for largely relying on shallow heuristics in the text, suggesting this data-driven learning method requires many target examples to learn a new target distribution (Lu and Ng, 2020; Rosenman et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 242, |
| "end": 267, |
| "text": "(Gururangan et al., 2020;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 268, |
| "end": 292, |
| "text": "Xia and Van Durme, 2021)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 499, |
| "end": 516, |
| "text": "(Lu and Ng, 2020;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 517, |
| "end": 539, |
| "text": "Rosenman et al., 2020)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The presence of out-of-vocabulary words in a new domain can create additional challenges. Span-BERT uses wordpiece tokenization, which can lead to misleading meaning representation for spans when a single wordpiece belongs to spans with different meanings (Joshi et al., 2020; Poerner et al., 2020b) . Consider for example, the spans euthmyia and dementia, both of which are common medical terminology but out-of-vocabulary words for Span-BERT, which tokenizes them to contain a common wordpiece: \"##ia\". As described by Poerner et al. (2020b) , this can lead to a coreference model incorrectly predicting the spans coreferent, since the suffix \"##ia\" is commonly associated with diseases. A coreference model could correct this by learning a more meaningful representation for the prefix tokens and downweighting the suffix \"##ia\" token, but this would take many target domain training examples.", |
| "cite_spans": [ |
| { |
| "start": 256, |
| "end": 276, |
| "text": "(Joshi et al., 2020;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 277, |
| "end": 299, |
| "text": "Poerner et al., 2020b)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 521, |
| "end": 543, |
| "text": "Poerner et al. (2020b)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Instead, we propose a more efficient method for integrating domain-specific knowledge into SpanBERT-based span representations, which requires only a small number of target training samples and leverages domain-specific concept knowledge.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We take a set of spans with some similarity in meaning to be a concept, and we use concepts of varying granularity (e.g., problem, headache).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "First, we introduce a retrofitting loss ( \u00a72.4) which guides the representation learning of span pairs to satisfy a knowledge-based distance function. This distance function reflects pairwise span relationships from our concept knowledge. As a result, we are able to align the target domain coreference structure encoded in the span representation with the global meanings of the spans. This allows the end-to-end model to more efficiently build more meaningful span representations for the target domain.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We also introduce an auxiliary scaffolding loss ( \u00a72.5) for a concept prediction task in order to ensure that knowledge relevant to the coreference task can be recovered from the span representation. This usage of an auxiliary task to produce a useful inductive bias was introduced in Swayamdipta et al. (2018) to add a syntactic labeling loss for coref-erence resolution since syntactic constituents are often coreferent. Spans belonging to the same concept within our concept knowledge usually corefer, so we generalize this technique to a broad, knowledge-based lexicon in our domain adaptation setting. While our retrofitting loss integrates relative knowledge into the span representation, we are able to supplement the span representation with global meaning using the scaffolding loss.", |
| "cite_spans": [ |
| { |
| "start": 285, |
| "end": 310, |
| "text": "Swayamdipta et al. (2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To evaluate our models, we take OntoNotes as our source domain and the i2b2/VA corpus of clinical notes as our target domain. We train our model on the source domain and 200 examples from the target domain, we evaluate model performance on the target domain. Our knowledge concepts are from the i2b2/VA dataset and the UMLS Metathesaurus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "First, we describe our methodology and introduce our retrofitting and scaffolding loss functions in \u00a72. Then, we describe our experiments to quantify model performance on our target domain in \u00a73, and finally we demonstrate the performance improvement over our baseline and on rare/OOV spans \u00a74. The main contribution of this work is in developing methods to integrate concept knowledge into coreference resolution systems to improve domain adaptation. We outperform our baseline primarily by improving precision, and in doing so, we demonstrate the utility of a set of knowledge concepts for adapting span representations to a new domain. We show that our model's performance does not deteriorate on highly domain-specific spans containing OOD frequently subdivided vocabulary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The objective of coreference resolution is to identify a set of coreference clusters from a document containing entities, where each coreference cluster contains mentions referencing a single entity. The set of mention candidates are referred to as spans, where a span is any contiguous set of tokens in the document. We can use the coreference clusters {C 1 , C 2 , . . . , C m } to define an unconnected graph where the set of spans are the vertices V = {s 1 , s 2 , . . . , s n }, and there are edges only between spans that belong to the same cluster. First, we describe the basic model setup for a neural coreference resolution system, and we then describe our proposed approach, that involves two new losses. produced by SpanBERT for mentions \"euthymia\" and \"dementia\" are fed into a co-reference resolution model. The baseline (Joshi et al., 2020) , which uses a single coreference loss, CL, produces similar span representations for \"euthymia\" and \"dementia\" (top).", |
| "cite_spans": [ |
| { |
| "start": 834, |
| "end": 854, |
| "text": "(Joshi et al., 2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2" |
| }, |
| { |
| "text": "When we incorporate knowledge concepts, C k 1 = {euthymia, pyrexia, . . .}, C k 2 = {dementia, polymyalgia}, . . ., into the span representation using our proposed losses SL and RL, the span representations for \"euthmyia\" and \"dementia\" are further apart (bottom), accurately reflecting that \"euthmyia\" is a symptom, while \"dementia\"is a disease, even though they share a wordpiece \"ia\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For the set of possible spans, we first produce span representations. The span representation is learned as a part of the neural end-to-end framework introduced in Lee et al. (2017) . Given span representations, each span representation h i is assigned a unary mention score. The mention score reflects the likelihood that a given span is in fact a mention. This score is used to obtain a set of candidate mentions. Each span pair h i , h j is assigned a pairwise antecedent score reflecting the likelihood that h i is the antecedent of h j . For an arbitrary span pair, the overall score is composed of the antecedent score and the mention scores for each span. The scoring functions are learned using standard feed-forward neural networks, allowing us to derive a distribution over all possible antecedents for a given span x.", |
| "cite_spans": [ |
| { |
| "start": 164, |
| "end": 181, |
| "text": "Lee et al. (2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "End-to-end Coreference Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "P(y) = e s(x,y) y \u2208Y e s(x,y )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "End-to-end Coreference Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where s is the scoring function as defined in Joshi et al. (2020) . We maximize the likelihood of the correct antecedents from the set of gold mentions, giving us a coreference loss (CL):", |
| "cite_spans": [ |
| { |
| "start": 46, |
| "end": 65, |
| "text": "Joshi et al. (2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "End-to-end Coreference Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "CL = log N i=1 \u0177\u2208Y(i)\u2229GOLD(i) P(\u0177)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "End-to-end Coreference Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "GOLD(i) denotes the set of spans in the gold cluster containing span i. Our baseline span representation is produced using SpanBERT with the single objective of minimizing the coreference loss (CL).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "End-to-end Coreference Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "SpanBERT is a pre-training method extending BERT that masks contiguous spans and also trains the span boundary representations to predict the masked span. The span representation h i is the concatenation of the two SpanBERT transformer states of the span endpoints (first and last word pieces) x START(i) , x END(i) and an attention vector x i computed over all the word pieces in the span (Joshi et al., 2019 (Joshi et al., , 2020 .", |
| "cite_spans": [ |
| { |
| "start": 390, |
| "end": 409, |
| "text": "(Joshi et al., 2019", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 410, |
| "end": 431, |
| "text": "(Joshi et al., , 2020", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SpanBERT representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "h i = x START(i) , x END(i) ,x i , \u03c6(i)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SpanBERT representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The attention vectorx i is intended to best represent the internal span itself (e.g. head word), whereas the endpoints better represent the context (Lee et al., 2017) . This suggests that thex i component of the overall span representation is the most natural part of the span to align with global, non-contextual knowledge.", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 166, |
| "text": "(Lee et al., 2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SpanBERT representation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We aim to create a span representation such that knowledge can be easily aligned with the coreference structure. Then, we can learn a span representation geometry such that connected spans are close and disconnected spans are far. In constructing such a vector space, we gain some flexibility to integrate any type of knowledge that shares the same structure as the coreference clusters. We can represent knowledge sources with concept clusters", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Knowledge into Span Representation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "{C k 1 , C k 2 , .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Knowledge into Span Representation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": ". . C k m } to resemble coreference clusters, so that we can impose that the coreference cluster graph is consistent with the concept cluster graph via two additional losses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Knowledge into Span Representation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We propose two complementary approaches to integrating knowledge. A pairwise retrofitting loss is intended to encode relative knowledge and a unary \"scaffold\" loss is intended to encode global knowledge into the span representation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Integrating Knowledge into Span Representation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We want the span representationx i to be close to coreferring spansx j . Similarly span representations belonging to the same cluster should be close. Similar to Faruqui et al. 2015, we enforce this objective by defining a custom distance function. But unlike Faruqui et al. (2015) , our custom distance function is task specific and instead of using a lexicon, we are using broad concepts.", |
| "cite_spans": [ |
| { |
| "start": 260, |
| "end": 281, |
| "text": "Faruqui et al. (2015)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrofitting with Concept Knowledge", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Concept Knowledge Distance metric We define our distance function to be composed of two elements: coreference information and concept knowledge.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrofitting with Concept Knowledge", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "d T (s i , s j ) = \u03b1 c d c (s i , s j ) + \u03b1 k d k (s i , s j )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrofitting with Concept Knowledge", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Variables \u03b1 c , \u03b1 k each denote weights that we tune, and T references the document from which valid span pairs are passed into the function.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrofitting with Concept Knowledge", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Coreference information To capture the coreference graph, we recreate the distance between span pairs that do not corefer (d c ). Note that this distance does not discriminate between span pairs that belong to separate coreference clusters and span pairs where one span does not belong to any coreference cluster at all. We define d c (s i , s j ) = 1 if the spans do not corefer, otherwise 0.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrofitting with Concept Knowledge", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Knowledge From our knowledge, we can obtain concepts, or sets of spans with some level of similarity of non-contextual meaning:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrofitting with Concept Knowledge", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "{C k 1 , C k 2 , . . . , C k m }.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrofitting with Concept Knowledge", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Coreferent spans refer to the same entity, and as a result, the concept type (e.g. person) must be consistent for any pair of coreferent spans. Thus, given a set of concepts, we want spans belonging to the same concept to have similar representations. Accordingly, we define d k (s i , s j ) = 0 if both spans belong to the same concept type, otherwise 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrofitting with Concept Knowledge", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Retrofitting Loss (RL) We want to create a span representation with a geometry defined by our custom distance function. We can optimize the end-to-end model to satisfy a loss applied to thex i component of the span representation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrofitting with Concept Knowledge", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "RL = 1 |r | i,j |d T (s i , s j ) \u2212 d(x i ,x j )|", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrofitting with Concept Knowledge", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Here |r | denotes the number of span pairs internal to one document, which we use to normalize, identifies the document that span s i , s j belongs to, and the function d is cosine distance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Retrofitting with Concept Knowledge", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "We introduce a concept identification auxiliary task to guide the model to construct a span representation from which the concept can be recovered. Swayamdipta et al. (2018) introduces the notion of a \"scaffold\" or auxiliary supervised loss function that is related to the primary task. Since coreferring spans nearly always belong to the same concept in our concept knowledge, concepts are a good choice for a scaffold. By sharing SpanBERT parameters optimizing for the scaffold loss and the overall coreference loss, we are able to encode the concept type in the span representation. Auxiliary Scaffolding Loss (SL) Following from Swayamdipta et al. 2018, we assign a distribution over the set of concepts", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 173, |
| "text": "Swayamdipta et al. (2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Identification as an Auxiliary Task", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "p(s i \u2208 C |x i ) = softmax c w c \u2022x i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Identification as an Auxiliary Task", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "where w c is a parameter vector associated with a concept C. This gives us the loss:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Identification as an Auxiliary Task", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "SL = 1 |r | i log p c k i |x i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Identification as an Auxiliary Task", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "where c k i is the concept associated with span s i . Finally, we optimize a summation of these losses, weighting each loss with a hyperparameter that we tune.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Identification as an Auxiliary Task", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "L = \u03b2 1 CL + \u03b2 2 RL + \u03b2 3 SL 3 Experiments 3.1 Datasets", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Identification as an Auxiliary Task", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "Our target corpus is a medical notes dataset, released as a part of the i2b2/VA Shared-Task and Workshop in 2011 (Uzuner et al., 2011) . The dataset contains 251 train documents, 51 of which we have randomly selected for development and 173 test documents. Our dev set is used to select some model parameters (e.g., loss function weights", |
| "cite_spans": [ |
| { |
| "start": 113, |
| "end": 134, |
| "text": "(Uzuner et al., 2011)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Identification as an Auxiliary Task", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "\u03b2 1 , \u03b2 2 , \u03b2 3 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Identification as an Auxiliary Task", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "The average length of these documents is 962.62 tokens with average coreference chain containing 4.48 spans. For our source domain corpus, we use OntoNotes, with documents on average half as long as the clinical notes and similar average chain length 4.21.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Concept Identification as an Auxiliary Task", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "3.1.1 Knowledge Lexicons i2b2 Concepts (i2b2): In addition to coreference chains, the i2b2/VA dataset includes broad concept labels for spans. We focus on four concepts: person (e.g. the patient, Dr. X), treatment (e.g. abdominal hysterectomy, the procedure), problem (e.g. coronary artery disease, slurred speech), test (e.g. MRI, echocardiogram). The i2b2 dataset annotates coreference chains s.t. corefering spans must belong to the same concept. In Table 1 , we report how these i2b2 concepts are distributed among the coreference chains. reider, 2004) . Each UMLS concept links synonymous spans, so the UMLS concepts are much more fine-grained than those defined in the i2b2 dataset. For example, a CUI for headache would map to {headaches, cranial pain, head pain cephalgia}. We used string match to assign a UMLS concept to spans in the training set. This resulted in the identification of 3,500 unique CUI's for the spans. We also experimented with using a partial string match to assign UMLS concepts to spans which we refer to as \"UMLS overlap\".", |
| "cite_spans": [ |
| { |
| "start": 543, |
| "end": 556, |
| "text": "reider, 2004)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 453, |
| "end": 460, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Concept Identification as an Auxiliary Task", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "For our supervised domain adaptation approach, we use a familiar approach of training a model on a source domain and tuning this model on a target domain. We take the current state of the art end-toend coreference model from Joshi et al. (2020) for our baseline. First, we train this SpanBERT-based end-to-end model on OntoNotes using the hyperparameters from Joshi et al. (2020) . Then, we continue training this model using the target i2b2 training dataset. Continued training has been shown to be effective for coreference resolution in out-of-domain settings (Xia and Van Durme, 2021) .", |
| "cite_spans": [ |
| { |
| "start": 360, |
| "end": 379, |
| "text": "Joshi et al. (2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 563, |
| "end": 588, |
| "text": "(Xia and Van Durme, 2021)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In order to improve the SpanBERT-based span representation, we introduce the i2b2 and UMLS concept knowledge in two ways: we retrofit the span representation to the concept knowledge ( \u00a72.4) and introduce an auxiliary task of concept identification as a scaffold ( \u00a72.5).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In our implementation, we add the corresponding retrofitting loss (RL) and scaffolding loss (SL) from these two objectives to the coreference loss (CL) to produce an overall loss which we optimize for. Aside from the difference in the loss function, the training process for our model resembles that of our baseline. We train our model first with the source domain OntoNotes and then continue training on the i2b2 dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For the retrofitting loss, we experiment with the knowledge lexicons i2b2 and UMLS individually and together. Recall that the knowledge lexicon distance metric relies on two main components: coreference clusters C 1 , C 2 , . . . and knowledge clusters C k 1 , C k 2 , . . .. When using i2b2 or UMLS knowledge concepts individually, we experiment with \u03b1 k , \u03b1 c values between (0, 1] at intervals of .1, and we found that \u03b1 k = .2 and \u03b1 c = 1.0 performs best over the dev set. When using i2b2 and UMLS concepts together, we found that assigning more weight (\u03b1 k = .5) to the broader i2b2 concepts than the UMLS concepts (\u03b1 k = .2) performs best over the dev set. When training our model on OntoNotes, we do not have the same knowledge lexicon available, so effectively, we have \u03b1 k = 0 until we begin training on the i2b2 data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For the concept identification auxiliary task, we use only i2b2 concepts for our knowledge lexicon, since using the fine-grained UMLS concepts would induce 3,500 class labels. Additionally, since the i2b2 knowledge lexicon is not available for OntoNotes, we ignore SL (\u03b2 3 = 0).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For our model, we choose max_seq_length of 512, BERT learning rate of 2e\u22125, and task specific learning rates of 1e \u2212 4. Similar to Joshi et al. (2020), we fine-tune 20 epochs for OntoNotes and the i2b2 training examples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Setup", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For successful domain adaptation, our model aims to demonstrate the value of incorporating concept knowledge. We evaluate overall coreference performance improvement as a result of the span retrofitting and auxiliary concept identification task in \u00a74.1. Then, we inspect whether representations for highly domain-specific spans are better for the coreference resolution task in our model than in the baseline ( \u00a74.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In Table 2 , we see that combining both of the losses we introduce in this work (CL + RL + SL) improves the model precision by 2.23% resulting in a .8% improvement in the F-1 score.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Coreference Performance", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Combining the retrofitting loss and the auxiliary scaffolding loss performs better than using each individually. It is possible that adding the scaffolding loss alone is not as helpful because it does not contain the UMLS knowledge.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreference Performance", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "While our model does improve precision and the overall F-1 score, recall largely remains constant. The retrofitting loss pushes unrelated spans belonging to different concepts further apart, and consequently we penalize the detection of any valid mention that does not appear in our concept knowledge. Incompleteness of our concept knowledge may be a contributing factor to the lack of recall improvement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreference Performance", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "It is expected that precision should be affected most by the additional losses SL and RL, since both are designed to integrate knowledge in the pairwise relationships between spans.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreference Performance", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The scaffolding loss guides the model to be able to distinguish spans belonging to different concepts from the span representation, and the retrofitting loss enforces a knowledge-based distance function between spans. Model recall is partly determined by a unary mention score used to identify candidate mentions from spans in our model. This mention score is impacted by our losses, since span representation is taken as input to the scoring function. However, the pairwise knowledge integrated into the span representation is much more useful for selecting an antecedent among a set of candidates for a given span. Consequently, since our loss functions have a bigger impact on the antecedent scoring function, then they will also have a bigger impact on model precision.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Coreference Performance", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In addition to overall performance, we are interested in comparing performance on rare spans that do not occur or occur infrequently in the source domain. Specifically, we are interested in addressing performance degradation that can occur as a result of wordpiece tokenization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance on Domain-specific Spans", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For example, consider a coreference cluster containing spans that are on average subdivided many times. This is an indication that the vocabulary of spans in the cluster is out of domain. The attention weighted vector over the tokens in the spans may be a misleading representation since the token embeddings correspond to shorter, less meaningful subwords. We report in Table 6 some examples of span pairs that the baseline model incorrectly predicts as coreferent.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 371, |
| "end": 378, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance on Domain-specific Spans", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Consider the spans \"open laparoscopy\" and \"exploratory laporotomy\". Their tokenizations would include the subwords [\"lap\", \"##aro\", \"##tom\", \"##y\"] and [\"lap\", \"##aro\", \"##sco\", \"##py\"]. This overlap of the first few subwords might lead the baseline to conclude that the spans are similar in non-contextual meaning and consequently coreferent. From our fine-grained UMLS knowledge, we know that laparoscopy and laparotomy belong to distinct concepts. Our loss functions RL, SL guide the model to produce disparate span representations since the spans map to distinct concepts. Our model uses knowledge to learn to upweight the wordpieces that are meaningful in context of the target domain (e.g., the suffix tokens in this example). We report examples of challenging span pairs that our model identifies and the baseline fails to identify in Table 5 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 842, |
| "end": 849, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance on Domain-specific Spans", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To collect quantitative evidence for our model's performance on the most challenging spans, we evaluate how model performance changes as the average number of wordpieces per span increases in Table 4 . We can observe that as the average number of wordpieces per span increases beyond 3.4, we start to see an increasing F-1 performance over the baseline. Similar to the overall results for the entire test set, we see that performance improvements in precision are largely responsible.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 192, |
| "end": 199, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance on Domain-specific Spans", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Beyond average number of wordpieces per span, we can also use the concept labels annotated in the i2b2 dataset to verify the performance of our model on domain-specific spans. We verified that concepts person and treatment are respectively dominated by spans like \"doctor\", \"patient\" \"surgery\", and \"procedure\", which are more likely to appear in the source domain than spans like \"afebrile\", \"basal cell carcinoma\", \"asculation\" from the problem and test concepts. Therefore, the performance improvement in Table 3 for concepts problem and test suggests that our model can outperform the baseline on domain-specific spans.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 508, |
| "end": 515, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance on Domain-specific Spans", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We want to visualize how the span representation captures the mention-antecedent relationship for different types of concepts -specifically how consistently information is extracted from the mention span representation to arrive at a predicted antecedent. We randomly select 200 span pairs with a mention-antecedent relationship. For each span pair, we extract the attention weighted vector over the span tokens, which is the same piece of the span used to compute SL, RL. For these 768dimensional vectors, we take the projection vectors from the mention to the antecedent vector. Then, in Figure 2 we transform these projections into a 2-dimensional space using PCA and plot them in R 2 similar to Faruqui et al. (2015) . For the baseline model (top), most of the mention-antecedent vectors share a similar direction regardless of concept type. However, for the CF + RL + SL model (bottom), there is a clear distinction in mention-antecedent vectors for each concept. This suggests that our model is able to construct a span representation that can capture the mention-antecedent relationship in a way that is (1) specific to the concept (2) consistent across all mention-antecedent pairs belonging to the concept. ", |
| "cite_spans": [ |
| { |
| "start": 699, |
| "end": 720, |
| "text": "Faruqui et al. (2015)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 590, |
| "end": 598, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Visualization", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Prior to the introduction of end-to-end neural models in Lee et al. (2017) , coreference resolution for the clinical domain used pipelined approaches, allowing for the propagation of errors from other core NLU tasks and relying on hand-crafted rules for Model Losses Metric Avg. # of wordpieces/span range [0.0,1.7) [1.7,3.4) [3.4,5.1) [5.1,6.8) [6.8,8.5) Baseline ( Table 4 : We take the set of coreference chains s.t. the average number of wordpieces per span falls withing the range, and evaluate model of the subset. We observe that our model outperforms the baseline coreference chains with spans that are subdivided more frequently TP Coreferent Span Pair Examples Hereafter, wife noted development of left sided weakness, facial droop, slurring of speech . . . with past medical history of atrial fibrillation on coumadin, coronary artery disease, hyperlipidemia, dementia with sudden onset left sided weakness, dysarthria evaluation and treatment of adenocarcinoma involving the transverse colon and gallbladder . . . DISCHARGE DIAG-NOSIS: 1. Metastatic gallbladder cancer He is status post a hemiarthroplasty on 10/17/97 . . . decreased hematocrit prior to his humeral fixation surgery An angiogram was done which disclosed possible subsegmental pulmonary emboli of the upper lobes as well . . . patient was bolused with intravenous heparin due to concern for pulmonary embolism Table 5 : Examples of coreferent span pairs missed by Baseline (CL), identified by our model (CL + RL + SL). In these cases, we can see that wordpiece tokenization is likely misleading the baseline model, since the spans in each pair have few wordpieces in common.", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 74, |
| "text": "Lee et al. (2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 316, |
| "end": 355, |
| "text": "[1.7,3.4) [3.4,5.1) [5.1,6.8) [6.8,8.5)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 367, |
| "end": 374, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 1388, |
| "end": 1395, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "She underwent an open laparoscopy . . . The patient is now admitted for exploratory laparotomy Right heart catheterization and coronary angiography on October 15 . . . urgently transferred by Dr. Lenni Factor for possible angioplasty 78-yo male with atrial fibrillation. . . Mechanical mitral valve: Anticoagulation was reversed He had a cardiac catheterization performed which revealed . . . management after this hospitalization and has done very well Table 6 : Examples of non-coreferent span pairs correctly missed our model (CL + RL + SL), but identified by Baseline (CL). In these cases, we can see that wordpiece tokenization is likely misleading the baseline model, since the spans in each pair have wordpieces in common.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 454, |
| "end": 461, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "FP Non-coreferent Span Pair Examples", |
| "sec_num": null |
| }, |
| { |
| "text": "resolving each type of entity in the domain (Jindal et al., 2014) . Feature-based methods like (Jindal et al., 2014) using knowledge in the coreference model rely on the availability of knowledge at test time. We are focused instead on the case where there may be no concept knowledge available when our model is deployed.", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 65, |
| "text": "(Jindal et al., 2014)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 95, |
| "end": 116, |
| "text": "(Jindal et al., 2014)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FP Non-coreferent Span Pair Examples", |
| "sec_num": null |
| }, |
| { |
| "text": "There has been some limited work in domain adaptation for coreference resolution. Yang et al. (2012) adapts a model trained on the MUC-6 and ACE 2005 datasets to the biomedical domain using an active learning approach, applying data augmentation and pruning techniques. Zhao and Ng (2014) propose a feature-based active learning method to learn cross-domain knowledge. Unlike these works, we take advantage of the modern expressive power of the SpanBERT representation. With the introduction of SpanBERT, there was a marked performance improvement for several NLU tasks including coreference resolution. Joshi et al. (2020) showed that SpanBERT could be fine-tuned to perform well on several datasets, e.g., GLU and ACE (Wang et al., 2018; Doddington et al., 2004) .", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 100, |
| "text": "Yang et al. (2012)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 270, |
| "end": 288, |
| "text": "Zhao and Ng (2014)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 604, |
| "end": 623, |
| "text": "Joshi et al. (2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 720, |
| "end": 739, |
| "text": "(Wang et al., 2018;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 740, |
| "end": 764, |
| "text": "Doddington et al., 2004)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FP Non-coreferent Span Pair Examples", |
| "sec_num": null |
| }, |
| { |
| "text": "However, Lu and Ng (2020) found that Span-BERT coreference resolvers generally rely more on mentions than context, so they are susceptible to small perturbations (e.g., changing all the names/nominal mentions in the test set). More generally, for NLU tasks several studies have found that neural models rely heavily on shallow heuristics in the text rather than learning the underlying structure of the linguistic phenomenon (Peng et al., 2020; Rosenman et al., 2020) , leading to the misinterpretation of context (Alt et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 25, |
| "text": "Lu and Ng (2020)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 425, |
| "end": 444, |
| "text": "(Peng et al., 2020;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 445, |
| "end": 467, |
| "text": "Rosenman et al., 2020)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 514, |
| "end": 532, |
| "text": "(Alt et al., 2020)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FP Non-coreferent Span Pair Examples", |
| "sec_num": null |
| }, |
| { |
| "text": "This poses a challenge for adapting a coreference model to noisy domains like clinical text and mostly OOD spans. With the recent success of finetuning for domain adaptation, a natural approach would be to fine-tune the SpanBERT representation, the coreference model, or both (Gururangan et al., 2020) . Fine-tuning the pretrained SpanBERT method alone can be expensive -Gururangan et al. (2020) showed that the best performing scheme requires 180K documents. In settings where there are fewer documents available in the target domain, it is still possible to fine-tune a coreference model with SpanBERT (Joshi et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 276, |
| "end": 301, |
| "text": "(Gururangan et al., 2020)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 604, |
| "end": 624, |
| "text": "(Joshi et al., 2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FP Non-coreferent Span Pair Examples", |
| "sec_num": null |
| }, |
| { |
| "text": "However, a persisting challenge with adapting the span representation is associated with the wordpiece tokenization employed by SpanBERT. For highly technical domain-specific language, it is natural that there is a higher average number of subwords per span, since it is unlikely that many spans or spans split only once belong to the limited 300,000 word SpanBERT vocabulary. Poerner et al. (2020a) show that wordpiece tokenization in the biomedical domain can lead to misleading span representations. Purely fine-tuning approaches fail to address this issue, since the SpanBERT vocabulary is constant.", |
| "cite_spans": [ |
| { |
| "start": 377, |
| "end": 399, |
| "text": "Poerner et al. (2020a)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FP Non-coreferent Span Pair Examples", |
| "sec_num": null |
| }, |
| { |
| "text": "At the same time, SpanBERT implicitly learns a geometry that encodes rich information related to the coreference task. Hewitt and Manning (2019) show that it is possible to learn a linear projection space from BERT embeddings to capture linguistic phenomena like syntactic dependencies. In Kahardipraja et al. (2020) , the authors use a Feedforward Neural Network (FFNN) to probe for properties of coreference structure, finding evidence that the SpanBERT representation can be used to predict coreference arcs and the head word of the span with >90% F1. Therefore, it is likely that overall performance greatly depends on the quality of span representation.", |
| "cite_spans": [ |
| { |
| "start": 119, |
| "end": 144, |
| "text": "Hewitt and Manning (2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 290, |
| "end": 316, |
| "text": "Kahardipraja et al. (2020)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FP Non-coreferent Span Pair Examples", |
| "sec_num": null |
| }, |
| { |
| "text": "In our work, we develop multiple techniques to adapt the span representation to a new domain with concept knowledge, allowing the model to be fine-tuned with fewer target domain examples and perform better on highly domain-specific entities. In particular, by incorporating knowledge into the span representation, we are able to restore a global, non-contextual meaning to excessively subtokenized spans.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FP Non-coreferent Span Pair Examples", |
| "sec_num": null |
| }, |
| { |
| "text": "We present methods to efficiently adapt coreference resolution models to a new domain using domainspecific concept knowledge. We demonstrate that we can integrate knowledge into the span representation using two losses to (1) retrofit the span representation to the concept knowledge and (2) ensure that knowledge can be recovered from the span representation using an auxiliary concept identification task. Using these methods, we are able to improve the performance of our baseline, especially for highly domain-specific spans.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Code publicly available at https://github.com/ nupoorgandhi/i2b2-coref-public", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "Thanks to Alex Chouldechova, Amanda Coston, David Steier, and the Allegheny County Department of Human Services for valuable feedback on this work. This work is supported by the Block Center for Technology and Innovation, and A.F. is supported by a Google PhD Fellowship.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Probing linguistic features of sentence-level representations in neural relation extraction", |
| "authors": [ |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Alt", |
| "suffix": "" |
| }, |
| { |
| "first": "Aleksandra", |
| "middle": [], |
| "last": "Gabryszak", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonhard", |
| "middle": [], |
| "last": "Hennig", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1534--1545", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.140" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christoph Alt, Aleksandra Gabryszak, and Leonhard Hennig. 2020. Probing linguistic features of sentence-level representations in neural relation ex- traction. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 1534-1545, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "The unified medical language system (umls): integrating biomedical terminology", |
| "authors": [ |
| { |
| "first": "Olivier", |
| "middle": [], |
| "last": "Bodenreider", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Nucleic acids research", |
| "volume": "32", |
| "issue": "suppl_1", |
| "pages": "267--270", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Olivier Bodenreider. 2004. The unified medical lan- guage system (umls): integrating biomedical termi- nology. Nucleic acids research, 32(suppl_1):D267- D270.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The automatic content extraction (ace) program-tasks, data, and evaluation", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "George R Doddington", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mark", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Przybocki", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Lance", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephanie", |
| "middle": [ |
| "M" |
| ], |
| "last": "Ramshaw", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [ |
| "M" |
| ], |
| "last": "Strassel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Weischedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Lrec", |
| "volume": "2", |
| "issue": "", |
| "pages": "837--840", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George R Doddington, Alexis Mitchell, Mark A Przy- bocki, Lance A Ramshaw, Stephanie M Strassel, and Ralph M Weischedel. 2004. The automatic content extraction (ace) program-tasks, data, and evaluation. In Lrec, volume 2, pages 837-840. Lisbon.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Retrofitting word vectors to semantic lexicons", |
| "authors": [ |
| { |
| "first": "Manaal", |
| "middle": [], |
| "last": "Faruqui", |
| "suffix": "" |
| }, |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Dodge", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujay", |
| "middle": [], |
| "last": "Kumar Jauhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1606--1615", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/N15-1184" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manaal Faruqui, Jesse Dodge, Sujay Kumar Jauhar, Chris Dyer, Eduard Hovy, and Noah A. Smith. 2015. Retrofitting word vectors to semantic lexicons. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 1606-1615, Denver, Colorado. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Don't stop pretraining: Adapt language models to domains and tasks", |
| "authors": [ |
| { |
| "first": "Ana", |
| "middle": [], |
| "last": "Suchin Gururangan", |
| "suffix": "" |
| }, |
| { |
| "first": "Swabha", |
| "middle": [], |
| "last": "Marasovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Swayamdipta", |
| "suffix": "" |
| }, |
| { |
| "first": "Iz", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Doug", |
| "middle": [], |
| "last": "Beltagy", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Downey", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8342--8360", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.740" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Suchin Gururangan, Ana Marasovi\u0107, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A. Smith. 2020. Don't stop pretraining: Adapt language models to domains and tasks. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8342-8360, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A structural probe for finding syntax in word representations", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Hewitt", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4129--4138", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Hewitt and Christopher D Manning. 2019. A structural probe for finding syntax in word represen- tations. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4129-4138.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Joint inference for end-to-end coreference resolution for clinical notes", |
| "authors": [ |
| { |
| "first": "Prateek", |
| "middle": [], |
| "last": "Jindal", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| }, |
| { |
| "first": "Carl A", |
| "middle": [], |
| "last": "Gunter", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 5th ACM Conference on Bioinformatics, Computational Biology, and Health Informatics", |
| "volume": "", |
| "issue": "", |
| "pages": "192--201", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Prateek Jindal, Dan Roth, and Carl A Gunter. 2014. Joint inference for end-to-end coreference resolution for clinical notes. In Proceedings of the 5th ACM Conference on Bioinformatics, Computational Biol- ogy, and Health Informatics, pages 192-201.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Spanbert: Improving pre-training by representing and predicting spans", |
| "authors": [ |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Daniel", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Weld", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "8", |
| "issue": "", |
| "pages": "64--77", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mandar Joshi, Danqi Chen, Yinhan Liu, Daniel S Weld, Luke Zettlemoyer, and Omer Levy. 2020. Spanbert: Improving pre-training by representing and predict- ing spans. Transactions of the Association for Com- putational Linguistics, 8:64-77.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "BERT for coreference resolution: Baselines and analysis", |
| "authors": [ |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Weld", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5803--5808", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1588" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mandar Joshi, Omer Levy, Luke Zettlemoyer, and Daniel Weld. 2019. BERT for coreference reso- lution: Baselines and analysis. In Proceedings of the 2019 Conference on Empirical Methods in Nat- ural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5803-5808, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Exploring span representations in neural coreference resolution", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Kahardipraja", |
| "suffix": "" |
| }, |
| { |
| "first": "Olena", |
| "middle": [], |
| "last": "Vyshnevska", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharid", |
| "middle": [], |
| "last": "Lo\u00e1iciga", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the First Workshop on Computational Approaches to Discourse", |
| "volume": "", |
| "issue": "", |
| "pages": "32--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Kahardipraja, Olena Vyshnevska, and Sharid Lo\u00e1iciga. 2020. Exploring span representations in neural coreference resolution. In Proceedings of the First Workshop on Computational Approaches to Discourse, pages 32-41.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Coreference resolution without span representations", |
| "authors": [ |
| { |
| "first": "Yuval", |
| "middle": [], |
| "last": "Kirstain", |
| "suffix": "" |
| }, |
| { |
| "first": "Ori", |
| "middle": [], |
| "last": "Ram", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
| "volume": "2", |
| "issue": "", |
| "pages": "14--19", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2021.acl-short.3" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuval Kirstain, Ori Ram, and Omer Levy. 2021. Coref- erence resolution without span representations. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Lan- guage Processing (Volume 2: Short Papers), pages 14-19, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "End-to-end neural coreference resolution", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luheng", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "188--197", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1018" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Lee, Luheng He, Mike Lewis, and Luke Zettle- moyer. 2017. End-to-end neural coreference reso- lution. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 188-197, Copenhagen, Denmark. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Conundrums in entity reference resolution", |
| "authors": [ |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "6620--6631", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jing Lu and Vincent Ng. 2020. Conundrums in entity reference resolution. In Proceedings of the 2020 Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 6620-6631.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Learning from Context or Names? An Empirical Study on Neural Relation Extraction", |
| "authors": [ |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianyu", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Yankai", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3661--3672", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.298" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hao Peng, Tianyu Gao, Xu Han, Yankai Lin, Peng Li, Zhiyuan Liu, Maosong Sun, and Jie Zhou. 2020. Learning from Context or Names? An Empirical Study on Neural Relation Extraction. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 3661-3672, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "E-BERT: Efficient-yet-effective entity embeddings for BERT", |
| "authors": [ |
| { |
| "first": "Nina", |
| "middle": [], |
| "last": "Poerner", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulli", |
| "middle": [], |
| "last": "Waltinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "803--818", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.findings-emnlp.71" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nina Poerner, Ulli Waltinger, and Hinrich Sch\u00fctze. 2020a. E-BERT: Efficient-yet-effective entity em- beddings for BERT. In Findings of the Associa- tion for Computational Linguistics: EMNLP 2020, pages 803-818, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Inexpensive domain adaptation of pretrained language models: Case studies on biomedical NER and covid-19 QA", |
| "authors": [ |
| { |
| "first": "Nina", |
| "middle": [], |
| "last": "Poerner", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulli", |
| "middle": [], |
| "last": "Waltinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "1482--1490", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.findings-emnlp.134" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nina Poerner, Ulli Waltinger, and Hinrich Sch\u00fctze. 2020b. Inexpensive domain adaptation of pretrained language models: Case studies on biomedical NER and covid-19 QA. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1482-1490, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Exposing Shallow Heuristics of Relation Extraction Models with Challenge Data", |
| "authors": [ |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Shachar Rosenman", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Jacovi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3702--3710", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.302" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shachar Rosenman, Alon Jacovi, and Yoav Goldberg. 2020. Exposing Shallow Heuristics of Relation Ex- traction Models with Challenge Data. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 3702-3710, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Noisy text data: Achilles' heel of bert", |
| "authors": [ |
| { |
| "first": "Ankit", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Makhija", |
| "suffix": "" |
| }, |
| { |
| "first": "Anuj", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Sixth Workshop on Noisy Usergenerated Text (W-NUT 2020)", |
| "volume": "", |
| "issue": "", |
| "pages": "16--21", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankit Srivastava, Piyush Makhija, and Anuj Gupta. 2020. Noisy text data: Achilles' heel of bert. In Proceedings of the Sixth Workshop on Noisy User- generated Text (W-NUT 2020), pages 16-21.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Syntactic scaffolds for semantic structures", |
| "authors": [ |
| { |
| "first": "Swabha", |
| "middle": [], |
| "last": "Swayamdipta", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3772--3782", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1412" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Swabha Swayamdipta, Sam Thomson, Kenton Lee, Luke Zettlemoyer, Chris Dyer, and Noah A. Smith. 2018. Syntactic scaffolds for semantic structures. In Proceedings of the 2018 Conference on Em- pirical Methods in Natural Language Processing, pages 3772-3782, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "i2b2/va challenge on concepts, assertions, and relations in clinical text", |
| "authors": [ |
| { |
| "first": "\u00d6zlem", |
| "middle": [], |
| "last": "Uzuner", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Brett", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuying", |
| "middle": [], |
| "last": "South", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott L", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Duvall", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Journal of the American Medical Informatics Association", |
| "volume": "18", |
| "issue": "5", |
| "pages": "552--556", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "\u00d6zlem Uzuner, Brett R South, Shuying Shen, and Scott L DuVall. 2011. 2010 i2b2/va challenge on concepts, assertions, and relations in clinical text. Journal of the American Medical Informatics Asso- ciation, 18(5):552-556.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop Black-boxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "353--355", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-5446" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Fe- lix Hill, Omer Levy, and Samuel Bowman. 2018. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In Pro- ceedings of the 2018 EMNLP Workshop Black- boxNLP: Analyzing and Interpreting Neural Net- works for NLP, pages 353-355, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Moving on from ontonotes: Coreference resolution model transfer", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2104.08457" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Xia and Benjamin Van Durme. 2021. Moving on from ontonotes: Coreference resolution model transfer. arXiv preprint arXiv:2104.08457.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Revealing the myth of higher-order inference in coreference resolution", |
| "authors": [ |
| { |
| "first": "Liyan", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Jinho", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "8527--8533", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.686" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liyan Xu and Jinho D. Choi. 2020. Revealing the myth of higher-order inference in coreference resolution. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8527-8533, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Domain adaptation for coreference resolution: An adaptive ensemble approach", |
| "authors": [ |
| { |
| "first": "Jian Bo", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiao", |
| "middle": [], |
| "last": "Liang Xiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivor Wai-Hung", |
| "middle": [], |
| "last": "Tsang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kian", |
| "middle": [], |
| "last": "Ming", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Chai", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [ |
| "Leong" |
| ], |
| "last": "Chieu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "744--753", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jian Bo Yang, Qi Mao, Qiao Liang Xiang, Ivor Wai- Hung Tsang, Kian Ming A Chai, and Hai Leong Chieu. 2012. Domain adaptation for coreference res- olution: An adaptive ensemble approach. In Pro- ceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Com- putational Natural Language Learning, pages 744- 753.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Domain adaptation with active learning for coreference resolution", |
| "authors": [ |
| { |
| "first": "Shanheng", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 5th International Workshop on Health Text Mining and Information Analysis (Louhi)", |
| "volume": "", |
| "issue": "", |
| "pages": "21--29", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shanheng Zhao and Hwee Tou Ng. 2014. Domain adaptation with active learning for coreference reso- lution. In Proceedings of the 5th International Work- shop on Health Text Mining and Information Analy- sis (Louhi), pages 21-29.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "Span representations" |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "Two-dimensional PCA projections of attentionweighted span tokens for vector pairs holding the mentionantecedent relation for the baseline (top) and our model CL + SL + RL (bottom). Our model uses concept knowledge to construct a span representation that more consistently captures the mention-antecedent relationship specific to each concept." |
| }, |
| "TABREF0": { |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>i2b2 Concept</td><td colspan=\"4\">Avg. Chain length # of Chains Train Test Train Test</td></tr><tr><td>Problem</td><td>2.96</td><td>2.9</td><td colspan=\"2\">1704 1186</td></tr><tr><td>Test</td><td>2.31</td><td>2.51</td><td>568</td><td>360</td></tr><tr><td>Person</td><td>14.16</td><td>12.54</td><td>754</td><td>571</td></tr><tr><td>Treatment</td><td>2.66</td><td>2.63</td><td colspan=\"2\">1262 1063</td></tr></table>", |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>Model Losses</td><td>Knowledge</td><td/><td>MUC</td><td/><td/><td colspan=\"4\">Model Performance B-cubed CEAFE</td><td/><td/><td>averages</td></tr><tr><td/><td/><td>R</td><td>P</td><td>F-1</td><td>R</td><td>P</td><td>F-1</td><td>R</td><td>P</td><td>F-1</td><td>R</td><td>P</td><td>F-1</td></tr><tr><td>Baseline (CL)</td><td>NA</td><td>70.93</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr></table>", |
| "text": "72.51 71.71 64.91 66.48 65.69 54.57 58.44 56.44 63.47 65.81 64.61 CL + RL + SL i2b2, UMLS 71.15 73.64 72.37 65.03 67.59 66.28 54.77 60.64 57.56 63.65 68.04 65.41 CL + RL i2b2, UMLS 70.66 73.88 72.23 64.36 67.89 66.06 54.39 60.38 57.22 63.14 67.28 65.17 CL + SL i2b2 70.28 74.14 72.16 64.22 68.27 66.18 54.69 60.43 57.41 63.43 67.17 65.24", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>Model Losses</td><td>Metric</td><td colspan=\"4\">i2b2 Concepts Person Problem Treatment Test</td></tr><tr><td/><td>Avg. R</td><td>63.47</td><td>50.92</td><td>54.85</td><td>48.08</td></tr><tr><td>Baseline (CL)</td><td>Avg. P</td><td>83.76</td><td>79.98</td><td>82.98</td><td>84.43</td></tr><tr><td/><td colspan=\"2\">Avg. F-1 69.92</td><td>58.0</td><td>62.24</td><td>54.74</td></tr><tr><td/><td>Avg. R</td><td>62.66</td><td>51.98</td><td>53.17</td><td>48.67</td></tr><tr><td>CL + RL + SL</td><td>Avg. P</td><td>86.23</td><td>83.20</td><td>86.22</td><td>86.67</td></tr><tr><td/><td colspan=\"2\">Avg. F-1 69.98</td><td>59.56</td><td>61.84</td><td>55.62</td></tr></table>", |
| "text": "Overall coreference performance for various combinations of loss functions and knowledge resources. Our model surpasses our Baseline (CL) largely as a result of an improvement in precision (scores averaged over 6 random seed initializations).", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "Performance on coreference chains belonging to a specific concept. Our model outperforms the baseline on more domain-specific spans indicating our model improves domain adaptation problem and test coreference clusters", |
| "type_str": "table" |
| } |
| } |
| } |
| } |